summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_dbg.c7
-rw-r--r--drivers/acpi/acpi_pnp.c3
-rw-r--r--drivers/acpi/acpi_watchdog.c6
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/evregion.c54
-rw-r--r--drivers/acpi/acpica/nspredef.c10
-rw-r--r--drivers/acpi/acpica/nsprepkg.c38
-rw-r--r--drivers/acpi/acpica/nsrepair2.c39
-rw-r--r--drivers/acpi/apei/apei-base.c2
-rw-r--r--drivers/acpi/arm64/iort.c14
-rw-r--r--drivers/acpi/cppc_acpi.c141
-rw-r--r--drivers/acpi/device_sysfs.c20
-rw-r--r--drivers/acpi/ec.c117
-rw-r--r--drivers/acpi/internal.h5
-rw-r--r--drivers/acpi/nfit/core.c90
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/processor_throttling.c1
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/sbs.c24
-rw-r--r--drivers/acpi/scan.c244
-rw-r--r--drivers/acpi/sleep.c305
-rw-r--r--drivers/acpi/sleep.h16
-rw-r--r--drivers/acpi/thermal.c46
-rw-r--r--drivers/acpi/tiny-power-button.c4
-rw-r--r--drivers/acpi/video_detect.c7
-rw-r--r--drivers/acpi/x86/s2idle.c448
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/core.c53
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/clock_ops.c223
-rw-r--r--drivers/base/power/domain.c87
-rw-r--r--drivers/base/power/domain_governor.c102
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/base/regmap/regmap-debugfs.c9
-rw-r--r--drivers/block/Kconfig9
-rw-r--r--drivers/block/Makefile7
-rw-r--r--drivers/block/amiflop.c98
-rw-r--r--drivers/block/aoe/aoecmd.c15
-rw-r--r--drivers/block/ataflop.c135
-rw-r--r--drivers/block/brd.c39
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c3
-rw-r--r--drivers/block/floppy.c154
-rw-r--r--drivers/block/loop.c72
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c15
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h2
-rw-r--r--drivers/block/nbd.c109
-rw-r--r--drivers/block/null_blk/Kconfig12
-rw-r--r--drivers/block/null_blk/Makefile11
-rw-r--r--drivers/block/null_blk/main.c (renamed from drivers/block/null_blk_main.c)63
-rw-r--r--drivers/block/null_blk/null_blk.h (renamed from drivers/block/null_blk.h)32
-rw-r--r--drivers/block/null_blk/trace.c (renamed from drivers/block/null_blk_trace.c)2
-rw-r--r--drivers/block/null_blk/trace.h (renamed from drivers/block/null_blk_trace.h)2
-rw-r--r--drivers/block/null_blk/zoned.c (renamed from drivers/block/null_blk_zoned.c)335
-rw-r--r--drivers/block/pktcdvd.c9
-rw-r--r--drivers/block/ps3disk.c3
-rw-r--r--drivers/block/ps3vram.c3
-rw-r--r--drivers/block/rbd.c51
-rw-r--r--drivers/block/rnbd/Kconfig1
-rw-r--r--drivers/block/rnbd/README1
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c26
-rw-r--r--drivers/block/rnbd/rnbd-clt.c132
-rw-r--r--drivers/block/rnbd/rnbd-clt.h16
-rw-r--r--drivers/block/rnbd/rnbd-proto.h9
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c66
-rw-r--r--drivers/block/rnbd/rnbd-srv.c31
-rw-r--r--drivers/block/rnbd/rnbd-srv.h4
-rw-r--r--drivers/block/swim.c17
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c4
-rw-r--r--drivers/block/xen-blkfront.c43
-rw-r--r--drivers/block/z2ram.c547
-rw-r--r--drivers/block/zram/zram_drv.c34
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/bus/arm-integrator-lm.c1
-rw-r--r--drivers/bus/mips_cdmm.c4
-rw-r--r--drivers/bus/simple-pm-bus.c3
-rw-r--r--drivers/bus/ti-sysc.c41
-rw-r--r--drivers/cdrom/cdrom.c12
-rw-r--r--drivers/char/agp/Makefile6
-rw-r--r--drivers/char/agp/agp.h5
-rw-r--r--drivers/char/hw_random/Kconfig16
-rw-r--r--drivers/char/ipmi/bt-bmc.c6
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/ps3flash.c3
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/at91/at91rm9200.c21
-rw-r--r--drivers/clk/at91/at91sam9260.c26
-rw-r--r--drivers/clk/at91/at91sam9g45.c32
-rw-r--r--drivers/clk/at91/at91sam9n12.c36
-rw-r--r--drivers/clk/at91/at91sam9rl.c23
-rw-r--r--drivers/clk/at91/at91sam9x5.c28
-rw-r--r--drivers/clk/at91/clk-master.c337
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c145
-rw-r--r--drivers/clk/at91/dt-compat.c15
-rw-r--r--drivers/clk/at91/pmc.h22
-rw-r--r--drivers/clk/at91/sam9x60.c51
-rw-r--r--drivers/clk/at91/sama5d2.c42
-rw-r--r--drivers/clk/at91/sama5d3.c38
-rw-r--r--drivers/clk/at91/sama5d4.c40
-rw-r--r--drivers/clk/at91/sama7g5.c223
-rw-r--r--drivers/clk/bcm/clk-bcm2711-dvp.c4
-rw-r--r--drivers/clk/clk-axi-clkgen.c64
-rw-r--r--drivers/clk/clk-composite.c50
-rw-r--r--drivers/clk/clk-divider.c34
-rw-r--r--drivers/clk/clk-fsl-flexspi.c106
-rw-r--r--drivers/clk/clk-fsl-sai.c14
-rw-r--r--drivers/clk/clk-pwm.c2
-rw-r--r--drivers/clk/clk-qoriq.c13
-rw-r--r--drivers/clk/clk-s2mps11.c1
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/clk-si5351.c13
-rw-r--r--drivers/clk/clk-versaclock5.c4
-rw-r--r--drivers/clk/clk.c153
-rw-r--r--drivers/clk/imx/Kconfig2
-rw-r--r--drivers/clk/imx/clk-gate2.c68
-rw-r--r--drivers/clk/imx/clk-imx8mm.c2
-rw-r--r--drivers/clk/imx/clk-imx8mn.c2
-rw-r--r--drivers/clk/imx/clk-imx8mp.c4
-rw-r--r--drivers/clk/imx/clk-imx8mq.c2
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c139
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c136
-rw-r--r--drivers/clk/imx/clk-lpcg-scu.c53
-rw-r--r--drivers/clk/imx/clk-pll14xx.c2
-rw-r--r--drivers/clk/imx/clk-scu.c227
-rw-r--r--drivers/clk/imx/clk-scu.h56
-rw-r--r--drivers/clk/imx/clk.h27
-rw-r--r--drivers/clk/ingenic/cgu.c14
-rw-r--r--drivers/clk/mediatek/clk-mux.c2
-rw-r--r--drivers/clk/mediatek/clk-mux.h4
-rw-r--r--drivers/clk/meson/Kconfig7
-rw-r--r--drivers/clk/meson/axg-aoclk.c5
-rw-r--r--drivers/clk/meson/axg.c824
-rw-r--r--drivers/clk/meson/axg.h23
-rw-r--r--drivers/clk/meson/g12a-aoclk.c5
-rw-r--r--drivers/clk/meson/g12a.c181
-rw-r--r--drivers/clk/meson/g12a.h3
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c5
-rw-r--r--drivers/clk/meson/gxbb.c5
-rw-r--r--drivers/clk/meson/meson-aoclk.c4
-rw-r--r--drivers/clk/meson/meson-eeclk.c3
-rw-r--r--drivers/clk/mmp/clk-audio.c6
-rw-r--r--drivers/clk/mvebu/armada-37xx-xtal.c4
-rw-r--r--drivers/clk/qcom/Kconfig25
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/camcc-sc7180.c1732
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c217
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h4
-rw-r--r--drivers/clk/qcom/clk-rpmh.c56
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c1
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c26
-rw-r--r--drivers/clk/qcom/gcc-sdx55.c1659
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c4
-rw-r--r--drivers/clk/qcom/lpass-gfm-sm8250.c320
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c135
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c2
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a774b1-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c51
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c79
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h5
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c2
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c3
-rw-r--r--drivers/clk/rockchip/Kconfig12
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c35
-rw-r--r--drivers/clk/rockchip/clk.c3
-rw-r--r--drivers/clk/samsung/Kconfig77
-rw-r--r--drivers/clk/samsung/Makefile24
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c199
-rw-r--r--drivers/clk/samsung/clk-pll.c147
-rw-r--r--drivers/clk/sifive/Kconfig8
-rw-r--r--drivers/clk/sifive/Makefile2
-rw-r--r--drivers/clk/sifive/fu540-prci.c599
-rw-r--r--drivers/clk/sifive/fu540-prci.h21
-rw-r--r--drivers/clk/sifive/fu740-prci.c123
-rw-r--r--drivers/clk/sifive/fu740-prci.h21
-rw-r--r--drivers/clk/sifive/sifive-prci.c574
-rw-r--r--drivers/clk/sifive/sifive-prci.h299
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c1
-rw-r--r--drivers/clk/tegra/clk-bpmp.c6
-rw-r--r--drivers/clk/tegra/clk-dfll.c4
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c2
-rw-r--r--drivers/clk/tegra/clk-tegra30.c2
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-43xx.c8
-rw-r--r--drivers/clk/ti/clk-44xx.c2
-rw-r--r--drivers/clk/ti/clk-54xx.c12
-rw-r--r--drivers/clk/ti/clk-7xx.c7
-rw-r--r--drivers/clk/ti/fapll.c11
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/connector/cn_queue.c8
-rw-r--r--drivers/connector/connector.c4
-rw-r--r--drivers/counter/ti-eqep.c35
-rw-r--r--drivers/cpufreq/Kconfig.arm5
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c115
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c24
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c204
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c43
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c134
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c3
-rw-r--r--drivers/cpufreq/powernow-k8.c9
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c10
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c2
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/tango-cpufreq.c38
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c45
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c3
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c3
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/keembay/Kconfig5
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h4
-rw-r--r--drivers/crypto/qat/Kconfig1
-rw-r--r--drivers/dax/bus.c71
-rw-r--r--drivers/dax/pmem/core.c2
-rw-r--r--drivers/dax/super.c1
-rw-r--r--drivers/devfreq/devfreq.c11
-rw-r--r--drivers/devfreq/governor.h2
-rw-r--r--drivers/devfreq/governor_passive.c44
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/devfreq/tegra30-devfreq.c4
-rw-r--r--drivers/dma-buf/dma-buf.c40
-rw-r--r--drivers/dma-buf/dma-resv.c2
-rw-r--r--drivers/dma-buf/heaps/Makefile1
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c327
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.c274
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.h53
-rw-r--r--drivers/dma-buf/heaps/system_heap.c414
-rw-r--r--drivers/dma/Kconfig10
-rw-r--r--drivers/dma/at_xdmac.c163
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dmatest.c13
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c4
-rw-r--r--drivers/dma/dw/core.c6
-rw-r--r--drivers/dma/hisi_dma.c5
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/idxd/cdev.c50
-rw-r--r--drivers/dma/idxd/device.c143
-rw-r--r--drivers/dma/idxd/dma.c9
-rw-r--r--drivers/dma/idxd/idxd.h58
-rw-r--r--drivers/dma/idxd/init.c123
-rw-r--r--drivers/dma/idxd/irq.c146
-rw-r--r--drivers/dma/idxd/registers.h28
-rw-r--r--drivers/dma/idxd/submit.c37
-rw-r--r--drivers/dma/idxd/sysfs.c211
-rw-r--r--drivers/dma/imx-dma.c33
-rw-r--r--drivers/dma/imx-sdma.c38
-rw-r--r--drivers/dma/ipu/ipu_idmac.c11
-rw-r--r--drivers/dma/k3dma.c9
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c1
-rw-r--r--drivers/dma/milbeaut-xdmac.c9
-rw-r--r--drivers/dma/moxart-dma.c5
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/mv_xor_v2.c4
-rw-r--r--drivers/dma/mxs-dma.c37
-rw-r--r--drivers/dma/of-dma.c10
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/dma/pxa_dma.c5
-rw-r--r--drivers/dma/qcom/Kconfig23
-rw-r--r--drivers/dma/qcom/Makefile2
-rw-r--r--drivers/dma/qcom/bam_dma.c8
-rw-r--r--drivers/dma/qcom/gpi.c2303
-rw-r--r--drivers/dma/qcom/qcom_adm.c905
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c12
-rw-r--r--drivers/dma/ste_dma40.c5
-rw-r--r--drivers/dma/stm32-dma.c47
-rw-r--r--drivers/dma/stm32-dmamux.c2
-rw-r--r--drivers/dma/stm32-mdma.c68
-rw-r--r--drivers/dma/sun6i-dma.c25
-rw-r--r--drivers/dma/tegra210-adma.c7
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/dma-crossbar.c6
-rw-r--r--drivers/dma/ti/k3-psil-am64.c158
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma-glue.c383
-rw-r--r--drivers/dma/ti/k3-udma-private.c45
-rw-r--r--drivers/dma/ti/k3-udma.c1934
-rw-r--r--drivers/dma/ti/k3-udma.h28
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c11
-rw-r--r--drivers/firmware/arm_scmi/notify.c10
-rw-r--r--drivers/firmware/arm_scmi/sensors.c720
-rw-r--r--drivers/firmware/efi/Kconfig4
-rw-r--r--drivers/firmware/efi/Makefile5
-rw-r--r--drivers/firmware/efi/apple-properties.c13
-rw-r--r--drivers/firmware/efi/capsule.c16
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c1
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/libstub/fdt.c3
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c44
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c5
-rw-r--r--drivers/firmware/efi/test/efi_test.c16
-rw-r--r--drivers/firmware/efi/test/efi_test.h3
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/firmware/imx/imx-dsp.c72
-rw-r--r--drivers/firmware/imx/scu-pd.c12
-rw-r--r--drivers/firmware/meson/Kconfig5
-rw-r--r--drivers/firmware/meson/meson_sm.c1
-rw-r--r--drivers/firmware/psci/psci.c126
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c6
-rw-r--r--drivers/firmware/ti_sci.c213
-rw-r--r--drivers/firmware/ti_sci.h72
-rw-r--r--drivers/firmware/xilinx/zynqmp.c46
-rw-r--r--drivers/gpio/Kconfig35
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/TODO85
-rw-r--r--drivers/gpio/gpio-104-idi-48.c6
-rw-r--r--drivers/gpio/gpio-amd8111.c11
-rw-r--r--drivers/gpio/gpio-ath79.c1
-rw-r--r--drivers/gpio/gpio-bt8xx.c8
-rw-r--r--drivers/gpio/gpio-cs5535.c8
-rw-r--r--drivers/gpio/gpio-dwapb.c7
-rw-r--r--drivers/gpio/gpio-exar.c155
-rw-r--r--drivers/gpio/gpio-hisi.c323
-rw-r--r--drivers/gpio/gpio-mockup.c11
-rw-r--r--drivers/gpio/gpio-msc313.c460
-rw-r--r--drivers/gpio/gpio-mvebu.c86
-rw-r--r--drivers/gpio/gpio-mxc.c102
-rw-r--r--drivers/gpio/gpio-mxs.c14
-rw-r--r--drivers/gpio/gpio-omap.c7
-rw-r--r--drivers/gpio/gpio-rcar.c87
-rw-r--r--drivers/gpio/gpio-sifive.c25
-rw-r--r--drivers/gpio/gpio-stmpe.c10
-rw-r--r--drivers/gpio/gpio-tegra.c22
-rw-r--r--drivers/gpio/gpio-tegra186.c11
-rw-r--r--drivers/gpio/gpio-tps65910.c12
-rw-r--r--drivers/gpio/gpio-xilinx.c49
-rw-r--r--drivers/gpio/gpio-xra1403.c10
-rw-r--r--drivers/gpio/gpiolib-acpi.c139
-rw-r--r--drivers/gpio/gpiolib-acpi.h2
-rw-r--r--drivers/gpio/gpiolib-cdev.c226
-rw-r--r--drivers/gpio/gpiolib-devres.c27
-rw-r--r--drivers/gpio/gpiolib-of.c14
-rw-r--r--drivers/gpio/gpiolib-sysfs.c2
-rw-r--r--drivers/gpio/gpiolib.c316
-rw-r--r--drivers/gpio/gpiolib.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c216
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c45
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h62
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c22
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c7
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c35
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c166
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c290
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c77
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c57
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c10
-rw-r--r--drivers/gpu/drm/drm_blend.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c1
-rw-r--r--drivers/gpu/drm/drm_client.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c12
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c24
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c155
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c40
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c14
-rw-r--r--drivers/gpu/drm/drm_plane.c9
-rw-r--r--drivers/gpu/drm/drm_syncobj.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c65
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c45
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h1
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c169
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c5
-rw-r--r--drivers/gpu/drm/i915/i915_active.c28
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c61
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.c146
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.h13
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c49
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/i915/i915_request.h37
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.h3
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c24
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-scaler.c47
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/mcde/Kconfig1
-rw-r--r--drivers/gpu/drm/mcde/Makefile2
-rw-r--r--drivers/gpu/drm/mcde/mcde_clk_div.c192
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c456
-rw-r--r--drivers/gpu/drm/mcde/mcde_display_regs.h91
-rw-r--r--drivers/gpu/drm/mcde/mcde_drm.h10
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c46
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h34
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c51
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c17
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c1
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c17
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c106
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c345
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c28
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c86
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_irq.c1
-rw-r--r--drivers/gpu/drm/via/via_verifier.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c7
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/hid/Kconfig4
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/amd-sfh-hid/Kconfig18
-rw-r--r--drivers/hid/amd-sfh-hid/Makefile13
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c246
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.c174
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h67
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c152
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h79
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c224
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h107
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h645
-rw-r--r--drivers/hid/hid-asus.c123
-rw-r--r--drivers/hid/hid-elecom.c51
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c8
-rw-r--r--drivers/hid/hid-ite.c13
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c4
-rw-r--r--drivers/hid/hid-mf.c2
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-quirks.c3
-rw-r--r--drivers/hid/hid-sony.c247
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/hid-wiimote-core.c2
-rw-r--r--drivers/hid/hidraw.c24
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hid/wacom_sys.c56
-rw-r--r--drivers/hid/wacom_wac.h2
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c2
-rw-r--r--drivers/hsi/hsi_core.c2
-rw-r--r--drivers/hv/channel.c174
-rw-r--r--drivers/hv/hv_balloon.c5
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/ring_buffer.c29
-rw-r--r--drivers/hv/vmbus_drv.c54
-rw-r--r--drivers/hwmon/amd_energy.c3
-rw-r--r--drivers/hwmon/k10temp.c98
-rw-r--r--drivers/hwmon/pwm-fan.c12
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/hwspinlock/sirf_hwspinlock.c2
-rw-r--r--drivers/hwspinlock/sprd_hwspinlock.c17
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c20
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c46
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c2
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c24
-rw-r--r--drivers/i3c/master.c5
-rw-r--r--drivers/i3c/master/Kconfig13
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile6
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd.h67
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c378
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c316
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c798
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat.h32
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c184
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dct.h16
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dct_v1.c36
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c784
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c308
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.h19
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h144
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ibi.h42
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c1041
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h79
-rw-r--r--drivers/ide/ide-atapi.c1
-rw-r--r--drivers/ide/ide-io.c7
-rw-r--r--drivers/ide/ide-pm.c2
-rw-r--r--drivers/ide/ide-probe.c66
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/idle/intel_idle.c43
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c31
-rw-r--r--drivers/iio/dac/ad5504.c4
-rw-r--r--drivers/iio/proximity/sx9310.c5
-rw-r--r--drivers/iio/temperature/mlx90632.c6
-rw-r--r--drivers/infiniband/core/cm.c9
-rw-r--r--drivers/infiniband/core/cma.c195
-rw-r--r--drivers/infiniband/core/cma_configfs.c8
-rw-r--r--drivers/infiniband/core/core_priv.h28
-rw-r--r--drivers/infiniband/core/counters.c138
-rw-r--r--drivers/infiniband/core/cq.c16
-rw-r--r--drivers/infiniband/core/device.c92
-rw-r--r--drivers/infiniband/core/iwpm_util.h2
-rw-r--r--drivers/infiniband/core/rdma_core.c101
-rw-r--r--drivers/infiniband/core/restrack.c24
-rw-r--r--drivers/infiniband/core/rw.c5
-rw-r--r--drivers/infiniband/core/sa_query.c3
-rw-r--r--drivers/infiniband/core/sysfs.c166
-rw-r--r--drivers/infiniband/core/ucma.c137
-rw-r--r--drivers/infiniband/core/umem.c19
-rw-r--r--drivers/infiniband/core/user_mad.c6
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c149
-rw-r--r--drivers/infiniband/core/uverbs_main.c4
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c18
-rw-r--r--drivers/infiniband/core/uverbs_std_types_async_fd.c5
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c5
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c4
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c14
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dm.c6
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c6
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c6
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c8
-rw-r--r--drivers/infiniband/core/uverbs_std_types_srq.c4
-rw-r--r--drivers/infiniband/core/uverbs_std_types_wq.c4
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c5
-rw-r--r--drivers/infiniband/core/verbs.c27
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c15
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c34
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c84
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c35
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c10
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c34
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c6
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c1
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c62
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c55
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c132
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c37
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c46
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h178
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c59
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c50
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c554
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h265
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c82
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c79
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c301
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c53
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c72
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h35
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h38
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c41
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c121
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c64
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c82
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c16
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c14
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c66
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c77
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c90
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c115
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c192
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h102
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c960
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c56
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c197
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c2
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c34
-rw-r--r--drivers/infiniband/hw/mlx5/srq.h1
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c80
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c61
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c42
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c13
-rw-r--r--drivers/infiniband/hw/qedr/main.c39
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c96
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c19
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c99
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h14
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c34
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig1
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c12
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c6
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c18
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c36
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h94
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c67
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h2
-rw-r--r--drivers/infiniband/sw/siw/Kconfig1
-rw-r--r--drivers/infiniband/sw/siw/siw.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c52
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c24
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c35
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h6
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c62
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c80
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c21
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c119
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c61
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h7
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c48
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c16
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h2
-rw-r--r--drivers/input/joystick/xpad.c17
-rw-r--r--drivers/input/misc/ariel-pwrbutton.c6
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h2
-rw-r--r--drivers/input/touchscreen/goodix.c2
-rw-r--r--drivers/input/touchscreen/ili210x.c26
-rw-r--r--drivers/input/touchscreen/st1232.c48
-rw-r--r--drivers/interconnect/imx/imx.c3
-rw-r--r--drivers/interconnect/imx/imx8mq.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig23
-rw-r--r--drivers/interconnect/qcom/sdm845.c3
-rw-r--r--drivers/iommu/Kconfig7
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd/amd_iommu.h7
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h23
-rw-r--r--drivers/iommu/amd/init.c65
-rw-r--r--drivers/iommu/amd/iommu.c5
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c244
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c59
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h30
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c13
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c17
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c274
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c76
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h5
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c8
-rw-r--r--drivers/iommu/dma-iommu.c217
-rw-r--r--drivers/iommu/intel/Kconfig1
-rw-r--r--drivers/iommu/intel/dmar.c6
-rw-r--r--drivers/iommu/intel/iommu.c1095
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/svm.c51
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c11
-rw-r--r--drivers/iommu/io-pgtable-arm.c19
-rw-r--r--drivers/iommu/ioasid.c38
-rw-r--r--drivers/iommu/iommu-sva-lib.c86
-rw-r--r--drivers/iommu/iommu-sva-lib.h15
-rw-r--r--drivers/iommu/iommu.c276
-rw-r--r--drivers/iommu/iova.c108
-rw-r--r--drivers/iommu/ipmmu-vmsa.c1
-rw-r--r--drivers/iommu/msm_iommu.c7
-rw-r--r--drivers/iommu/mtk_iommu.c1
-rw-r--r--drivers/iommu/tegra-smmu.c240
-rw-r--r--drivers/irqchip/Kconfig5
-rw-r--r--drivers/irqchip/irq-bcm2836.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c12
-rw-r--r--drivers/irqchip/irq-gic-v4.c19
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-mips-cpu.c7
-rw-r--r--drivers/irqchip/irq-sl28cpld.c2
-rw-r--r--drivers/isdn/mISDN/Kconfig1
-rw-r--r--drivers/leds/Kconfig3
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/flash/Kconfig15
-rw-r--r--drivers/leds/flash/Makefile3
-rw-r--r--drivers/leds/flash/leds-rt8515.c397
-rw-r--r--drivers/leds/led-triggers.c10
-rw-r--r--drivers/leds/leds-ariel.c6
-rw-r--r--drivers/leds/leds-lm3533.c2
-rw-r--r--drivers/leds/leds-lp50xx.c6
-rw-r--r--drivers/leds/leds-netxbig.c35
-rw-r--r--drivers/leds/leds-turris-omnia.c85
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/lightnvm/core.c3
-rw-r--r--drivers/lightnvm/pblk-core.c4
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/arm_mhu_db.c2
-rw-r--r--drivers/mailbox/arm_mhuv2.c1136
-rw-r--r--drivers/mailbox/stm32-ipcc.c15
-rw-r--r--drivers/md/Kconfig24
-rw-r--r--drivers/md/Makefile20
-rw-r--r--drivers/md/bcache/features.c2
-rw-r--r--drivers/md/bcache/features.h36
-rw-r--r--drivers/md/bcache/request.c9
-rw-r--r--drivers/md/bcache/super.c93
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/writeback.c9
-rw-r--r--drivers/md/dm-bufio.c6
-rw-r--r--drivers/md/dm-cache-target.c7
-rw-r--r--drivers/md/dm-core.h7
-rw-r--r--drivers/md/dm-crypt.c176
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-integrity.c92
-rw-r--r--drivers/md/dm-ioctl.c1
-rw-r--r--drivers/md/dm-ps-historical-service-time.c (renamed from drivers/md/dm-historical-service-time.c)0
-rw-r--r--drivers/md/dm-ps-io-affinity.c272
-rw-r--r--drivers/md/dm-ps-queue-length.c (renamed from drivers/md/dm-queue-length.c)0
-rw-r--r--drivers/md/dm-ps-round-robin.c (renamed from drivers/md/dm-round-robin.c)0
-rw-r--r--drivers/md/dm-ps-service-time.c (renamed from drivers/md/dm-service-time.c)0
-rw-r--r--drivers/md/dm-raid.c9
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-snap.c24
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-switch.c1
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm-unstripe.c1
-rw-r--r--drivers/md/dm-verity-target.c12
-rw-r--r--drivers/md/dm-verity-verify-sig.c9
-rw-r--r--drivers/md/dm-zero.c1
-rw-r--r--drivers/md/dm.c62
-rw-r--r--drivers/md/md-cluster.c75
-rw-r--r--drivers/md/md-linear.c6
-rw-r--r--drivers/md/md.c150
-rw-r--r--drivers/md/md.h6
-rw-r--r--drivers/md/raid0.c8
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/media/cec/platform/Makefile1
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c3
-rw-r--r--drivers/media/i2c/ccs-pll.c8
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c2
-rw-r--r--drivers/media/platform/qcom/venus/core.c2
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c21
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h1
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c11
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c27
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c17
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c4
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c2
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/rc-main.c8
-rw-r--r--drivers/media/rc/serial_ir.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c4
-rw-r--r--drivers/memory/Kconfig6
-rw-r--r--drivers/memory/jz4780-nemc.c6
-rw-r--r--drivers/memory/mtk-smi.c19
-rw-r--r--drivers/memory/renesas-rpc-if.c18
-rw-r--r--drivers/memory/tegra/Kconfig10
-rw-r--r--drivers/memory/tegra/mc.c155
-rw-r--r--drivers/memory/tegra/mc.h22
-rw-r--r--drivers/memory/tegra/tegra114.c6
-rw-r--r--drivers/memory/tegra/tegra124-emc.c22
-rw-r--r--drivers/memory/tegra/tegra124.c6
-rw-r--r--drivers/memory/tegra/tegra20-emc.c520
-rw-r--r--drivers/memory/tegra/tegra20.c77
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c39
-rw-r--r--drivers/memory/tegra/tegra210.c60
-rw-r--r--drivers/memory/tegra/tegra30-emc.c411
-rw-r--r--drivers/memory/tegra/tegra30.c245
-rw-r--r--drivers/message/fusion/mptbase.c15
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptsas.c45
-rw-r--r--drivers/message/fusion/mptsas.h1
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/mfd/88pm800.c2
-rw-r--r--drivers/mfd/88pm860x-core.c50
-rw-r--r--drivers/mfd/Kconfig32
-rw-r--r--drivers/mfd/ab8500-debugfs.c18
-rw-r--r--drivers/mfd/altera-sysmgr.c2
-rw-r--r--drivers/mfd/at91-usart.c12
-rw-r--r--drivers/mfd/axp20x-i2c.c2
-rw-r--r--drivers/mfd/bcm590xx.c2
-rw-r--r--drivers/mfd/da9055-core.c8
-rw-r--r--drivers/mfd/da9055-i2c.c2
-rw-r--r--drivers/mfd/da9062-core.c26
-rw-r--r--drivers/mfd/da9063-core.c8
-rw-r--r--drivers/mfd/da9063-i2c.c2
-rw-r--r--drivers/mfd/da9150-core.c8
-rw-r--r--drivers/mfd/ene-kb3930.c2
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c2
-rw-r--r--drivers/mfd/hi655x-pmic.c2
-rw-r--r--drivers/mfd/htc-i2cpld.c2
-rw-r--r--drivers/mfd/intel_msic.c18
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c4
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c14
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c10
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c4
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c10
-rw-r--r--drivers/mfd/ioc3.c16
-rw-r--r--drivers/mfd/kempld-core.c100
-rw-r--r--drivers/mfd/lp8788.c4
-rw-r--r--drivers/mfd/madera-core.c35
-rw-r--r--drivers/mfd/max77650.c2
-rw-r--r--drivers/mfd/max77686.c2
-rw-r--r--drivers/mfd/max8925-core.c56
-rw-r--r--drivers/mfd/menelaus.c2
-rw-r--r--drivers/mfd/motorola-cpcap.c6
-rw-r--r--drivers/mfd/mt6397-core.c2
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c4
-rw-r--r--drivers/mfd/rdc321x-southbridge.c4
-rw-r--r--drivers/mfd/retu-mfd.c4
-rw-r--r--drivers/mfd/rk808.c8
-rw-r--r--drivers/mfd/rt5033.c2
-rw-r--r--drivers/mfd/stmfx.c12
-rw-r--r--drivers/mfd/stmpe.c10
-rw-r--r--drivers/mfd/sun4i-gpadc.c4
-rw-r--r--drivers/mfd/syscon.c18
-rw-r--r--drivers/mfd/tc3589x.c4
-rw-r--r--drivers/mfd/tc6387xb.c2
-rw-r--r--drivers/mfd/tc6393xb.c6
-rw-r--r--drivers/mfd/tps65090.c2
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/tps65910.c46
-rw-r--r--drivers/mfd/tps65911-comparator.c4
-rw-r--r--drivers/mfd/tps80031.c2
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/mfd/wm831x-core.c52
-rw-r--r--drivers/mfd/wm8994-core.c6
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c7
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c77
-rw-r--r--drivers/misc/habanalabs/common/device.c19
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c65
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h5
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c1
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c9
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/common/memory.c10
-rw-r--r--drivers/misc/habanalabs/common/mmu.c6
-rw-r--r--drivers/misc/habanalabs/common/mmu_v1.c12
-rw-r--r--drivers/misc/habanalabs/common/pci.c28
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c194
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h7
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c78
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h9
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/bugs.c50
-rw-r--r--drivers/misc/lkdtm/core.c6
-rw-r--r--drivers/misc/lkdtm/fortify.c82
-rw-r--r--drivers/misc/lkdtm/lkdtm.h22
-rw-r--r--drivers/misc/lkdtm/powerpc.c120
-rw-r--r--drivers/misc/ocxl/context.c4
-rw-r--r--drivers/misc/ocxl/link.c70
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h9
-rw-r--r--drivers/misc/ocxl/trace.h64
-rw-r--r--drivers/misc/pvpanic.c19
-rw-r--r--drivers/mmc/core/queue.c4
-rw-r--r--drivers/mmc/core/sdio_cis.c6
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c6
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c27
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h7
-rw-r--r--drivers/mmc/host/sdhci-xenon.c7
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/devices/docg3.c5
-rw-r--r--drivers/mtd/devices/phram.c54
-rw-r--r--drivers/mtd/devices/powernv_flash.c5
-rw-r--r--drivers/mtd/maps/physmap-bt1-rom.c8
-rw-r--r--drivers/mtd/maps/plat-ram.c11
-rw-r--r--drivers/mtd/mtd_blkdevs.c28
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/mtdcore.c6
-rw-r--r--drivers/mtd/mtdpart.c2
-rw-r--r--drivers/mtd/mtdsuper.c17
-rw-r--r--drivers/mtd/nand/Kconfig33
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/core.c124
-rw-r--r--drivers/mtd/nand/ecc-sw-bch.c406
-rw-r--r--drivers/mtd/nand/ecc-sw-hamming.c (renamed from drivers/mtd/nand/raw/nand_ecc.c)311
-rw-r--r--drivers/mtd/nand/ecc.c140
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c444
-rw-r--r--drivers/mtd/nand/onenand/onenand_bbt.c32
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c16
-rw-r--r--drivers/mtd/nand/raw/Kconfig45
-rw-r--r--drivers/mtd/nand/raw/Makefile4
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c1
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c6
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c3
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c38
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c1
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c1
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c1
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c5
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/Makefile3
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c78
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h1
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c722
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c1
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c3
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c6
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c7
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c94
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c204
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/raw/nand_bch.c219
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c9
-rw-r--r--drivers/mtd/nand/raw/nandsim.c10
-rw-r--r--drivers/mtd/nand/raw/ndfc.c3
-rw-r--r--drivers/mtd/nand/raw/omap2.c64
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c7
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c74
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c1495
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c5
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c3
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c149
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c7
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c5
-rw-r--r--drivers/mtd/nand/spi/Kconfig1
-rw-r--r--drivers/mtd/nand/spi/core.c274
-rw-r--r--drivers/mtd/nand/spi/macronix.c47
-rw-r--r--drivers/mtd/nand/spi/micron.c124
-rw-r--r--drivers/mtd/nand/spi/toshiba.c2
-rw-r--r--drivers/mtd/parsers/cmdlinepart.c14
-rw-r--r--drivers/mtd/sm_ftl.c30
-rw-r--r--drivers/mtd/spi-nor/Kconfig44
-rw-r--r--drivers/mtd/spi-nor/atmel.c191
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/core.c595
-rw-r--r--drivers/mtd/spi-nor/core.h38
-rw-r--r--drivers/mtd/spi-nor/esmt.c2
-rw-r--r--drivers/mtd/spi-nor/intel.c19
-rw-r--r--drivers/mtd/spi-nor/micron-st.c115
-rw-r--r--drivers/mtd/spi-nor/sfdp.c172
-rw-r--r--drivers/mtd/spi-nor/sfdp.h8
-rw-r--r--drivers/mtd/spi-nor/spansion.c172
-rw-r--r--drivers/mtd/spi-nor/sst.c32
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c31
-rw-r--r--drivers/mtd/ubi/build.c7
-rw-r--r--drivers/mtd/ubi/eba.c3
-rw-r--r--drivers/mtd/ubi/gluebi.c2
-rw-r--r--drivers/mtd/ubi/io.c9
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/arcnet/arc-rimi.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h6
-rw-r--r--drivers/net/arcnet/arcnet.c66
-rw-r--r--drivers/net/arcnet/com20020-isa.c4
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/arcnet/com20020_cs.c2
-rw-r--r--drivers/net/arcnet/com90io.c4
-rw-r--r--drivers/net/arcnet/com90xx.c4
-rw-r--r--drivers/net/bareudp.c25
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/dev.c6
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c26
-rw-r--r--drivers/net/can/rcar/Kconfig4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c19
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c8
-rw-r--r--drivers/net/can/vxcan.c6
-rw-r--r--drivers/net/dsa/b53/b53_common.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c8
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig1
-rw-r--r--drivers/net/dsa/lantiq_gswip.c34
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c30
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c4
-rw-r--r--drivers/net/dsa/qca/ar9331.c33
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c7
-rw-r--r--drivers/net/ethernet/amd/Kconfig10
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c763
-rw-r--r--drivers/net/ethernet/amd/am79c961a.h143
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c110
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c41
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c30
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c46
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c29
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c48
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c43
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c23
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c8
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c6
-rw-r--r--drivers/net/ethernet/ni/Kconfig2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c81
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c20
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h13
-rw-r--r--drivers/net/hyperv/netvsc.c22
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/hyperv/rndis_filter.c6
-rw-r--r--drivers/net/ipa/gsi.c131
-rw-r--r--drivers/net/ipa/ipa_clock.c4
-rw-r--r--drivers/net/ipa/ipa_endpoint.c6
-rw-r--r--drivers/net/ipa/ipa_mem.c4
-rw-r--r--drivers/net/ipa/ipa_modem.c1
-rw-r--r--drivers/net/mdio/mdio-bitbang.c6
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c12
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/cdc_ether.c13
-rw-r--r--drivers/net/usb/cdc_ncm.c23
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/usb/r8153_ecm.c8
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/virtio_net.c13
-rw-r--r--drivers/net/wan/Kconfig1
-rw-r--r--drivers/net/wan/hdlc_ppp.c7
-rw-r--r--drivers/net/wireguard/socket.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c44
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c65
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c9
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c8
-rw-r--r--drivers/net/xen-netback/xenbus.c6
-rw-r--r--drivers/nfc/s3fwrn5/nci.c25
-rw-r--r--drivers/nfc/s3fwrn5/nci.h22
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.c3
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.h1
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c27
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.h15
-rw-r--r--drivers/ntb/msi.c4
-rw-r--r--drivers/nvdimm/btt.h3
-rw-r--r--drivers/nvdimm/claim.c1
-rw-r--r--drivers/nvdimm/core.c1
-rw-r--r--drivers/nvdimm/dimm_devs.c18
-rw-r--r--drivers/nvdimm/label.c13
-rw-r--r--drivers/nvdimm/namespace_devs.c10
-rw-r--r--drivers/nvdimm/pmem.c1
-rw-r--r--drivers/nvme/host/core.c202
-rw-r--r--drivers/nvme/host/fabrics.c25
-rw-r--r--drivers/nvme/host/fabrics.h5
-rw-r--r--drivers/nvme/host/fc.c17
-rw-r--r--drivers/nvme/host/lightnvm.c8
-rw-r--r--drivers/nvme/host/multipath.c7
-rw-r--r--drivers/nvme/host/nvme.h18
-rw-r--r--drivers/nvme/host/pci.c162
-rw-r--r--drivers/nvme/host/rdma.c19
-rw-r--r--drivers/nvme/host/tcp.c32
-rw-r--r--drivers/nvme/host/zns.c13
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/admin-cmd.c28
-rw-r--r--drivers/nvme/target/configfs.c40
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/discovery.c1
-rw-r--r--drivers/nvme/target/fcloop.c86
-rw-r--r--drivers/nvme/target/loop.c12
-rw-r--r--drivers/nvme/target/nvmet.h4
-rw-r--r--drivers/nvme/target/passthru.c37
-rw-r--r--drivers/nvme/target/rdma.c29
-rw-r--r--drivers/nvme/target/tcp.c3
-rw-r--r--drivers/of/device.c10
-rw-r--r--drivers/opp/core.c797
-rw-r--r--drivers/opp/of.c230
-rw-r--r--drivers/opp/opp.h17
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c28
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c9
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h1
-rw-r--r--drivers/pci/controller/dwc/Kconfig14
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c141
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c389
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c39
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c79
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c37
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c67
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c53
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c41
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c37
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c76
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c58
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c143
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c70
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c105
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h27
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c37
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c67
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c62
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c127
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c62
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c184
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c38
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c51
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c8
-rw-r--r--drivers/pci/controller/pci-aardvark.c22
-rw-r--r--drivers/pci/controller/pci-host-generic.c1
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c1
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c13
-rw-r--r--drivers/pci/controller/pci-xgene.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c17
-rw-r--r--drivers/pci/controller/pcie-iproc.c60
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c27
-rw-r--r--drivers/pci/controller/pcie-rockchip.h8
-rw-r--r--drivers/pci/controller/pcie-tango.c1
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c17
-rw-r--r--drivers/pci/controller/pcie-xilinx.c11
-rw-r--r--drivers/pci/controller/vmd.c56
-rw-r--r--drivers/pci/ecam.c32
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c2
-rw-r--r--drivers/pci/msi.c70
-rw-r--r--drivers/pci/of.c5
-rw-r--r--drivers/pci/p2pdma.c35
-rw-r--r--drivers/pci/pci-acpi.c22
-rw-r--r--drivers/pci/pci-driver.c151
-rw-r--r--drivers/pci/pci-sysfs.c10
-rw-r--r--drivers/pci/pci.c118
-rw-r--r--drivers/pci/pci.h68
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer.c101
-rw-r--r--drivers/pci/pcie/aer_inject.c5
-rw-r--r--drivers/pci/pcie/err.c95
-rw-r--r--drivers/pci/pcie/pme.c16
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/pcie/portdrv_pci.c13
-rw-r--r--drivers/pci/pcie/ptm.c60
-rw-r--r--drivers/pci/pcie/rcec.c190
-rw-r--r--drivers/pci/probe.c30
-rw-r--r--drivers/pci/quirks.c41
-rw-r--r--drivers/pci/slot.c11
-rw-r--r--drivers/pcmcia/Kconfig5
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/at91_cf.c50
-rw-r--r--drivers/pcmcia/db1xxx_ss.c2
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/pcmcia/omap_cf.c8
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c591
-rw-r--r--drivers/pcmcia/vrc4173_cardu.h247
-rw-r--r--drivers/perf/arm_pmu.c5
-rw-r--r--drivers/phy/ingenic/Makefile2
-rw-r--r--drivers/phy/mediatek/Kconfig4
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c19
-rw-r--r--drivers/pinctrl/Kconfig20
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/pinctrl-s500.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c2
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx21.c330
-rw-r--r--drivers/pinctrl/intel/Kconfig25
-rw-r--r--drivers/pinctrl/intel/Makefile3
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c437
-rw-r--r--drivers/pinctrl/intel/pinctrl-elkhartlake.c513
-rw-r--r--drivers/pinctrl/intel/pinctrl-lakefield.c375
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c10
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c13
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c14
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c8
-rw-r--r--drivers/pinctrl/meson/Kconfig17
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c10
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c10
-rw-r--r--drivers/pinctrl/pinctrl-amd.c43
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c22
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c14
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c1347
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c892
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c186
-rw-r--r--drivers/pinctrl/qcom/Kconfig88
-rw-r--r--drivers/pinctrl/qcom/Makefile4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c695
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c98
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c1844
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c1495
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c1018
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/renesas/core.c2
-rw-r--r--drivers/pinctrl/renesas/core.h4
-rw-r--r--drivers/pinctrl/renesas/gpio.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c55
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7790.c146
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7791.c18
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77950.c45
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c134
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c132
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c132
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77990.c132
-rw-r--r--drivers/pinctrl/renesas/pfc-sh73a0.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c68
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h12
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c5
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c9
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c14
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c336
-rw-r--r--drivers/platform/surface/Kconfig8
-rw-r--r--drivers/platform/surface/surface_gpe.c4
-rw-r--r--drivers/platform/x86/amd-pmc.c2
-rw-r--r--drivers/platform/x86/dell-wmi-sysman/sysman.c6
-rw-r--r--drivers/platform/x86/hp-wmi.c17
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c31
-rw-r--r--drivers/platform/x86/ideapad-laptop.c15
-rw-r--r--drivers/platform/x86/intel-vbtn.c10
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c18
-rw-r--r--drivers/power/reset/Kconfig7
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/ocelot-reset.c30
-rw-r--r--drivers/power/reset/qnap-poweroff.c8
-rw-r--r--drivers/power/reset/regulator-poweroff.c82
-rw-r--r--drivers/power/reset/syscon-poweroff.c8
-rw-r--r--drivers/power/supply/ab8500_btemp.c68
-rw-r--r--drivers/power/supply/ab8500_charger.c99
-rw-r--r--drivers/power/supply/ab8500_fg.c106
-rw-r--r--drivers/power/supply/abx500_chargalg.c19
-rw-r--r--drivers/power/supply/axp20x_usb_power.c10
-rw-r--r--drivers/power/supply/axp288_charger.c28
-rw-r--r--drivers/power/supply/bq24190_charger.c21
-rw-r--r--drivers/power/supply/bq24735-charger.c1
-rw-r--r--drivers/power/supply/bq25890_charger.c2
-rw-r--r--drivers/power/supply/collie_battery.c151
-rw-r--r--drivers/power/supply/generic-adc-battery.c31
-rw-r--r--drivers/power/supply/max17042_battery.c23
-rw-r--r--drivers/power/supply/max8997_charger.c67
-rw-r--r--drivers/power/supply/pm2301_charger.c3
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/s3c_adc_battery.c57
-rw-r--r--drivers/power/supply/wm831x_power.c1
-rw-r--r--drivers/ps3/ps3-lpm.c3
-rw-r--r--drivers/ps3/ps3-vuart.c10
-rw-r--r--drivers/ps3/ps3stor_lib.c2
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/pwm/Kconfig71
-rw-r--r--drivers/pwm/Makefile3
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-ab8500.c4
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c264
-rw-r--r--drivers/pwm/pwm-atmel.c4
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c4
-rw-r--r--drivers/pwm/pwm-bcm-kona.c4
-rw-r--r--drivers/pwm/pwm-bcm2835.c73
-rw-r--r--drivers/pwm/pwm-berlin.c4
-rw-r--r--drivers/pwm/pwm-brcmstb.c4
-rw-r--r--drivers/pwm/pwm-clps711x.c4
-rw-r--r--drivers/pwm/pwm-crc.c2
-rw-r--r--drivers/pwm/pwm-dwc.c319
-rw-r--r--drivers/pwm/pwm-ep93xx.c4
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c4
-rw-r--r--drivers/pwm/pwm-hibvt.c7
-rw-r--r--drivers/pwm/pwm-img.c4
-rw-r--r--drivers/pwm/pwm-imx-tpm.c10
-rw-r--r--drivers/pwm/pwm-imx1.c25
-rw-r--r--drivers/pwm/pwm-imx27.c28
-rw-r--r--drivers/pwm/pwm-intel-lgm.c244
-rw-r--r--drivers/pwm/pwm-iqs620a.c2
-rw-r--r--drivers/pwm/pwm-keembay.c245
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c4
-rw-r--r--drivers/pwm/pwm-lpc32xx.c4
-rw-r--r--drivers/pwm/pwm-lpss-platform.c39
-rw-r--r--drivers/pwm/pwm-lpss.c7
-rw-r--r--drivers/pwm/pwm-mediatek.c23
-rw-r--r--drivers/pwm/pwm-meson.c4
-rw-r--r--drivers/pwm/pwm-mtk-disp.c4
-rw-r--r--drivers/pwm/pwm-pxa.c4
-rw-r--r--drivers/pwm/pwm-rcar.c6
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c4
-rw-r--r--drivers/pwm/pwm-rockchip.c4
-rw-r--r--drivers/pwm/pwm-samsung.c4
-rw-r--r--drivers/pwm/pwm-sifive.c4
-rw-r--r--drivers/pwm/pwm-sl28cpld.c4
-rw-r--r--drivers/pwm/pwm-spear.c4
-rw-r--r--drivers/pwm/pwm-sti.c54
-rw-r--r--drivers/pwm/pwm-sun4i.c10
-rw-r--r--drivers/pwm/pwm-tegra.c4
-rw-r--r--drivers/pwm/pwm-tiecap.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c16
-rw-r--r--drivers/pwm/pwm-vt8500.c4
-rw-r--r--drivers/pwm/pwm-zx.c5
-rw-r--r--drivers/rapidio/rio.c81
-rw-r--r--drivers/regulator/Kconfig1
-rw-r--r--drivers/regulator/bd718x7-regulator.c57
-rw-r--r--drivers/regulator/core.c44
-rw-r--r--drivers/regulator/pf8x00-regulator.c8
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig14
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/ingenic_rproc.c2
-rw-r--r--drivers/remoteproc/mtk_common.h30
-rw-r--r--drivers/remoteproc/mtk_scp.c116
-rw-r--r--drivers/remoteproc/pru_rproc.c875
-rw-r--r--drivers/remoteproc/pru_rproc.h46
-rw-r--r--drivers/remoteproc/qcom_common.c146
-rw-r--r--drivers/remoteproc/qcom_common.h8
-rw-r--r--drivers/remoteproc/qcom_q6v5.c8
-rw-r--r--drivers/remoteproc/qcom_q6v5.h3
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c15
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c124
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c35
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c2
-rw-r--r--drivers/remoteproc/qcom_sysmon.c118
-rw-r--r--drivers/remoteproc/qcom_wcnss.c109
-rw-r--r--drivers/remoteproc/remoteproc_core.c69
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c140
-rw-r--r--drivers/remoteproc/remoteproc_elf_helpers.h26
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c33
-rw-r--r--drivers/remoteproc/stm32_rproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c4
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c113
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c41
-rw-r--r--drivers/reset/Kconfig10
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c73
-rw-r--r--drivers/reset/reset-bcm6345.c135
-rw-r--r--drivers/reset/reset-meson.c8
-rw-r--r--drivers/reset/reset-socfpga.c11
-rw-r--r--drivers/reset/reset-ti-syscon.c4
-rw-r--r--drivers/rpmsg/Kconfig9
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/rpmsg_core.c44
-rw-r--r--drivers/rpmsg/rpmsg_internal.h14
-rw-r--r--drivers/rpmsg/rpmsg_ns.c126
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c186
-rw-r--r--drivers/rtc/Kconfig33
-rw-r--r--drivers/rtc/class.c52
-rw-r--r--drivers/rtc/nvmem.c91
-rw-r--r--drivers/rtc/rtc-88pm80x.c2
-rw-r--r--drivers/rtc/rtc-88pm860x.c2
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c2
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c2
-rw-r--r--drivers/rtc/rtc-ab3100.c2
-rw-r--r--drivers/rtc/rtc-ab8500.c2
-rw-r--r--drivers/rtc/rtc-abx80x.c2
-rw-r--r--drivers/rtc/rtc-ac100.c2
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-aspeed.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c108
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-au1xxx.c2
-rw-r--r--drivers/rtc/rtc-bd70528.c2
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c5
-rw-r--r--drivers/rtc/rtc-cadence.c2
-rw-r--r--drivers/rtc/rtc-cmos.c14
-rw-r--r--drivers/rtc/rtc-coh901331.c2
-rw-r--r--drivers/rtc/rtc-cpcap.c8
-rw-r--r--drivers/rtc/rtc-cros-ec.c2
-rw-r--r--drivers/rtc/rtc-da9052.c2
-rw-r--r--drivers/rtc/rtc-da9063.c4
-rw-r--r--drivers/rtc/rtc-davinci.c2
-rw-r--r--drivers/rtc/rtc-digicolor.c2
-rw-r--r--drivers/rtc/rtc-dm355evm.c2
-rw-r--r--drivers/rtc/rtc-ds1305.c5
-rw-r--r--drivers/rtc/rtc-ds1307.c83
-rw-r--r--drivers/rtc/rtc-ds1343.c5
-rw-r--r--drivers/rtc/rtc-ds1347.c2
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c6
-rw-r--r--drivers/rtc/rtc-ds1553.c6
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-ds1685.c5
-rw-r--r--drivers/rtc/rtc-ds1742.c6
-rw-r--r--drivers/rtc/rtc-ds2404.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c2
-rw-r--r--drivers/rtc/rtc-ep93xx.c8
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c2
-rw-r--r--drivers/rtc/rtc-ftrtc010.c2
-rw-r--r--drivers/rtc/rtc-goldfish.c2
-rw-r--r--drivers/rtc/rtc-hym8563.c7
-rw-r--r--drivers/rtc/rtc-imx-sc.c2
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/rtc/rtc-isl12026.c4
-rw-r--r--drivers/rtc/rtc-isl1208.c4
-rw-r--r--drivers/rtc/rtc-jz4740.c2
-rw-r--r--drivers/rtc/rtc-lpc32xx.c2
-rw-r--r--drivers/rtc/rtc-ls1x.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-m48t59.c5
-rw-r--r--drivers/rtc/rtc-m48t86.c5
-rw-r--r--drivers/rtc/rtc-mc13xxx.c2
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c7
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c2
-rw-r--r--drivers/rtc/rtc-meson.c4
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-mrst.c2
-rw-r--r--drivers/rtc/rtc-mt2712.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-mv.c2
-rw-r--r--drivers/rtc/rtc-mxc.c25
-rw-r--r--drivers/rtc/rtc-mxc_v2.c2
-rw-r--r--drivers/rtc/rtc-omap.c13
-rw-r--r--drivers/rtc/rtc-pcap.c2
-rw-r--r--drivers/rtc/rtc-pcf2123.c2
-rw-r--r--drivers/rtc/rtc-pcf2127.c73
-rw-r--r--drivers/rtc/rtc-pcf85063.c4
-rw-r--r--drivers/rtc/rtc-pcf8523.c34
-rw-r--r--drivers/rtc/rtc-pcf85363.c4
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pic32.c2
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c8
-rw-r--r--drivers/rtc/rtc-pm8xxx.c2
-rw-r--r--drivers/rtc/rtc-ps3.c2
-rw-r--r--drivers/rtc/rtc-r9701.c2
-rw-r--r--drivers/rtc/rtc-rc5t619.c2
-rw-r--r--drivers/rtc/rtc-rk808.c2
-rw-r--r--drivers/rtc/rtc-rp5c01.c5
-rw-r--r--drivers/rtc/rtc-rs5c348.c2
-rw-r--r--drivers/rtc/rtc-rv3028.c6
-rw-r--r--drivers/rtc/rtc-rv3029c2.c4
-rw-r--r--drivers/rtc/rtc-rv3032.c8
-rw-r--r--drivers/rtc/rtc-rv8803.c5
-rw-r--r--drivers/rtc/rtc-rx6110.c165
-rw-r--r--drivers/rtc/rtc-rx8010.c2
-rw-r--r--drivers/rtc/rtc-rx8581.c4
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/rtc/rtc-s3c.c230
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-sc27xx.c42
-rw-r--r--drivers/rtc/rtc-sd3078.c2
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-sirfsoc.c2
-rw-r--r--drivers/rtc/rtc-snvs.c69
-rw-r--r--drivers/rtc/rtc-st-lpc.c2
-rw-r--r--drivers/rtc/rtc-starfire.c2
-rw-r--r--drivers/rtc/rtc-stk17ta8.c5
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-sun4v.c2
-rw-r--r--drivers/rtc/rtc-sun6i.c10
-rw-r--r--drivers/rtc/rtc-sunxi.c2
-rw-r--r--drivers/rtc/rtc-tegra.c2
-rw-r--r--drivers/rtc/rtc-test.c3
-rw-r--r--drivers/rtc/rtc-tps6586x.c2
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/rtc/rtc-tx4939.c5
-rw-r--r--drivers/rtc/rtc-vr41xx.c2
-rw-r--r--drivers/rtc/rtc-vt8500.c2
-rw-r--r--drivers/rtc/rtc-wilco-ec.c2
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-xgene.c2
-rw-r--r--drivers/rtc/rtc-zynqmp.c2
-rw-r--r--drivers/rtc/sysfs.c2
-rw-r--r--drivers/s390/block/dasd.c31
-rw-r--r--drivers/s390/block/dasd_alias.c22
-rw-r--r--drivers/s390/block/dasd_devmap.c117
-rw-r--r--drivers/s390/block/dasd_eckd.c176
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_int.h114
-rw-r--r--drivers/s390/block/dasd_ioctl.c36
-rw-r--r--drivers/s390/cio/chp.c15
-rw-r--r--drivers/s390/cio/chp.h1
-rw-r--r--drivers/s390/cio/chsc.c145
-rw-r--r--drivers/s390/cio/chsc.h3
-rw-r--r--drivers/s390/cio/device.c17
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c26
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h4
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c6
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c149
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h12
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c2
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c38
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/s390/scsi/zfcp_aux.c11
-rw-r--r--drivers/s390/scsi/zfcp_def.h1
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c22
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c47
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h2
-rw-r--r--drivers/scsi/3w-9xxx.c30
-rw-r--r--drivers/scsi/3w-sas.c32
-rw-r--r--drivers/scsi/NCR5380.c92
-rw-r--r--drivers/scsi/NCR5380.h3
-rw-r--r--drivers/scsi/aacraid/commctrl.c22
-rw-r--r--drivers/scsi/aacraid/commsup.c1
-rw-r--r--drivers/scsi/aacraid/linit.c95
-rw-r--r--drivers/scsi/advansys.c16
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h12
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c12
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c43
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h10
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c10
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c46
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c55
-rw-r--r--drivers/scsi/atari_scsi.c10
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c1
-rw-r--r--drivers/scsi/csiostor/csio_wr.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig1
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c6
-rw-r--r--drivers/scsi/dc395x.c15
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c10
-rw-r--r--drivers/scsi/esas2r/esas2r.h5
-rw-r--r--drivers/scsi/esas2r/esas2r_disc.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c53
-rw-r--r--drivers/scsi/esas2r/esas2r_int.c8
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c11
-rw-r--r--drivers/scsi/fcoe/fcoe.c3
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c4
-rw-r--r--drivers/scsi/fnic/fnic.h3
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c10
-rw-r--r--drivers/scsi/fnic/fnic_main.c3
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c17
-rw-r--r--drivers/scsi/fnic/vnic_dev.c8
-rw-r--r--drivers/scsi/g_NCR5380.c12
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h32
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c1365
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c66
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c1295
-rw-r--r--drivers/scsi/hpsa.c38
-rw-r--r--drivers/scsi/hptiop.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c199
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h28
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/isci/init.c18
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_encode.h707
-rw-r--r--drivers/scsi/libfc/fc_exch.c19
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libfc/fc_libfc.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c139
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c452
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h47
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1296
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c797
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c290
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c215
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c384
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c77
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1095
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c744
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c128
-rw-r--r--drivers/scsi/mac_scsi.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c229
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c29
-rw-r--r--drivers/scsi/mpt3sas/Kconfig2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c437
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h145
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c760
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1285
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c312
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h94
-rw-r--r--drivers/scsi/mvumi.c50
-rw-r--r--drivers/scsi/myrb.c1
-rw-r--r--drivers/scsi/myrs.c1
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c7
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c1456
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c164
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c149
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h47
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c1728
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h6
-rw-r--r--drivers/scsi/pmcraid.c44
-rw-r--r--drivers/scsi/ps3rom.c3
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c77
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c72
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c95
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c3
-rw-r--r--drivers/scsi/scsi_debug.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c3
-rw-r--r--drivers/scsi/scsi_lib.c176
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c417
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c27
-rw-r--r--drivers/scsi/scsi_transport_srp.c9
-rw-r--r--drivers/scsi/scsicam.c2
-rw-r--r--drivers/scsi/sd.c34
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c14
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c1
-rw-r--r--drivers/scsi/sr.c17
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/stex.c1
-rw-r--r--drivers/scsi/storvsc_drv.c26
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c1
-rw-r--r--drivers/scsi/ufs/Kconfig3
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c3
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c15
-rw-r--r--drivers/scsi/ufs/ufs-exynos.h13
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c13
-rw-r--r--drivers/scsi/ufs/ufs-hisi.h13
-rw-r--r--drivers/scsi/ufs/ufs-mediatek-trace.h36
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c286
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h35
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c40
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h11
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c7
-rw-r--r--drivers/scsi/ufs/ufs.h3
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.c11
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c73
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c38
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h1
-rw-r--r--drivers/scsi/ufs/ufshcd.c592
-rw-r--r--drivers/scsi/ufs/ufshcd.h153
-rw-r--r--drivers/scsi/ufs/unipro.h6
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/virq-debugfs.c14
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile3
-rw-r--r--drivers/soc/amlogic/Kconfig12
-rw-r--r--drivers/soc/amlogic/meson-canvas.c4
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c5
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c8
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c7
-rw-r--r--drivers/soc/amlogic/meson-secure-pwrc.c5
-rw-r--r--drivers/soc/aspeed/Kconfig47
-rw-r--r--drivers/soc/aspeed/Makefile1
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c37
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c2
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c135
-rw-r--r--drivers/soc/atmel/soc.c19
-rw-r--r--drivers/soc/atmel/soc.h3
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c18
-rw-r--r--drivers/soc/fsl/qbman/qman.c8
-rw-r--r--drivers/soc/fsl/qe/qe_common.c2
-rw-r--r--drivers/soc/fsl/rcpm.c35
-rw-r--r--drivers/soc/imx/Kconfig2
-rw-r--r--drivers/soc/litex/Kconfig20
-rw-r--r--drivers/soc/litex/Makefile3
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c177
-rw-r--r--drivers/soc/mediatek/Kconfig22
-rw-r--r--drivers/soc/mediatek/Makefile2
-rw-r--r--drivers/soc/mediatek/mt8173-pm-domains.h94
-rw-r--r--drivers/soc/mediatek/mt8183-pm-domains.h221
-rw-r--r--drivers/soc/mediatek/mt8192-pm-domains.h292
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c41
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c308
-rw-r--r--drivers/soc/mediatek/mtk-infracfg.c5
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c11
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c614
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h102
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c5
-rw-r--r--drivers/soc/qcom/Kconfig5
-rw-r--r--drivers/soc/qcom/cmd-db.c8
-rw-r--r--drivers/soc/qcom/kryo-l2-accessors.c2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c147
-rw-r--r--drivers/soc/qcom/pdr_interface.c8
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c9
-rw-r--r--drivers/soc/qcom/qcom_aoss.c4
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c7
-rw-r--r--drivers/soc/qcom/rpmh.c14
-rw-r--r--drivers/soc/qcom/rpmhpd.c16
-rw-r--r--drivers/soc/qcom/rpmpd.c85
-rw-r--r--drivers/soc/qcom/smem.c3
-rw-r--r--drivers/soc/qcom/smp2p.c6
-rw-r--r--drivers/soc/qcom/smsm.c4
-rw-r--r--drivers/soc/qcom/socinfo.c6
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c8
-rw-r--r--drivers/soc/renesas/rmobile-sysc.c17
-rw-r--r--drivers/soc/rockchip/io-domain.c4
-rw-r--r--drivers/soc/samsung/exynos-chipid.c11
-rw-r--r--drivers/soc/samsung/exynos-pmu.c11
-rw-r--r--drivers/soc/samsung/exynos5422-asv.c2
-rw-r--r--drivers/soc/samsung/s3c-pm-check.c2
-rw-r--r--drivers/soc/sunxi/Kconfig8
-rw-r--r--drivers/soc/sunxi/Makefile1
-rw-r--r--drivers/soc/sunxi/sunxi_mbus.c127
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra124.c21
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c8
-rw-r--r--drivers/soc/ti/Kconfig18
-rw-r--r--drivers/soc/ti/k3-ringacc.c423
-rw-r--r--drivers/soc/ti/k3-socinfo.c1
-rw-r--r--drivers/soc/ti/knav_dma.c15
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c66
-rw-r--r--drivers/soc/ti/omap_prm.c369
-rw-r--r--drivers/soc/ti/pm33xx.c21
-rw-r--r--drivers/soc/ti/pruss.c6
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c12
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c8
-rw-r--r--drivers/soc/xilinx/Kconfig1
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c96
-rw-r--r--drivers/spi/spi-altera.c29
-rw-r--r--drivers/spi/spi-cadence.c6
-rw-r--r--drivers/spi/spi-fsl-spi.c5
-rw-r--r--drivers/spi/spi-geni-qcom.c84
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/android/ashmem.c6
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/hikey9xx/hisi-spmi-controller.c21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c20
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c18
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c4
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_wifi_regd.h6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/loopback/tcm_loop.c14
-rw-r--r--drivers/target/target_core_device.c59
-rw-r--r--drivers/target/target_core_file.c6
-rw-r--r--drivers/target/target_core_iblock.c1
-rw-r--r--drivers/target/target_core_pr.c1
-rw-r--r--drivers/target/target_core_pscsi.c5
-rw-r--r--drivers/target/target_core_sbc.c139
-rw-r--r--drivers/target/target_core_tmr.c166
-rw-r--r--drivers/target/target_core_tpg.c2
-rw-r--r--drivers/target/target_core_transport.c170
-rw-r--r--drivers/target/target_core_user.c175
-rw-r--r--drivers/target/target_core_xcopy.c119
-rw-r--r--drivers/target/target_core_xcopy.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c3
-rw-r--r--drivers/target/tcm_fc/tfc_io.c1
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/tee/optee/call.c4
-rw-r--r--drivers/tee/optee/device.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thunderbolt/acpi.c2
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/serial/Kconfig32
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/liteuart.c404
-rw-r--r--drivers/tty/serial/mvebu-uart.c10
-rw-r--r--drivers/tty/serial/sifive.c1
-rw-r--r--drivers/tty/tty_io.c65
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c22
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c6
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usblp.c40
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/dwc2/gadget.c8
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c2
-rw-r--r--drivers/usb/dwc3/gadget.c16
-rw-r--r--drivers/usb/dwc3/ulpi.c38
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/composite.c10
-rw-r--r--drivers/usb/gadget/configfs.c19
-rw-r--r--drivers/usb/gadget/function/f_printer.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c69
-rw-r--r--drivers/usb/gadget/function/storage_common.c8
-rw-r--r--drivers/usb/gadget/function/u_ether.c9
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c4
-rw-r--r--drivers/usb/gadget/legacy/ether.c4
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c5
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/core.c16
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c45
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c122
-rw-r--r--drivers/usb/host/ehci-hcd.c12
-rw-r--r--drivers/usb/host/ehci-hub.c3
-rw-r--r--drivers/usb/host/ehci-ps3.c4
-rw-r--r--drivers/usb/host/ohci-ps3.c4
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c130
-rw-r--r--drivers/usb/host/xhci-mtk.c2
-rw-r--r--drivers/usb/host/xhci-mtk.h15
-rw-r--r--drivers/usb/host/xhci-mvebu.c42
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-plat.c20
-rw-r--r--drivers/usb/host/xhci-plat.h1
-rw-r--r--drivers/usb/host/xhci-ring.c33
-rw-r--r--drivers/usb/host/xhci-tegra.c7
-rw-r--r--drivers/usb/host/xhci.c32
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/misc/yurex.c3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c1
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/iuu_phoenix.c20
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/altmodes/Kconfig2
-rw-r--r--drivers/usb/typec/class.c2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c11
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/vdpa/Kconfig16
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c11
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h1
-rw-r--r--drivers/vdpa/mlx5/core/mr.c28
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c23
-rw-r--r--drivers/vdpa/vdpa.c2
-rw-r--r--drivers/vdpa/vdpa_sim/Makefile1
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c298
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h105
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c177
-rw-r--r--drivers/vfio/mdev/mdev_core.c4
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c13
-rw-r--r--drivers/vfio/pci/vfio_pci.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c7
-rw-r--r--drivers/vfio/vfio.c18
-rw-r--r--drivers/vfio/vfio_iommu_type1.c24
-rw-r--r--drivers/vfio/virqfd.c3
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/scsi.c3
-rw-r--r--drivers/vhost/vdpa.c10
-rw-r--r--drivers/vhost/vsock.c68
-rw-r--r--drivers/video/backlight/pwm_bl.c70
-rw-r--r--drivers/video/fbdev/geode/lxfb_ops.c1
-rw-r--r--drivers/video/fbdev/pm2fb.c1
-rw-r--r--drivers/video/fbdev/ps3fb.c4
-rw-r--r--drivers/virtio/virtio_mem.c1789
-rw-r--r--drivers/virtio/virtio_ring.c8
-rw-r--r--drivers/watchdog/Kconfig16
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/geodewdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c8
-rw-r--r--drivers/watchdog/iTCO_wdt.c34
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c4
-rw-r--r--drivers/watchdog/pnx833x_wdt.c277
-rw-r--r--drivers/watchdog/qcom-wdt.c20
-rw-r--r--drivers/watchdog/rti_wdt.c4
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c12
-rw-r--r--drivers/watchdog/sp805_wdt.c1
-rw-r--r--drivers/watchdog/sprd_wdt.c43
-rw-r--r--drivers/watchdog/stm32_iwdg.c13
-rw-r--r--drivers/watchdog/watchdog_core.c22
-rw-r--r--drivers/watchdog/wdat_wdt.c6
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/events/events_base.c192
-rw-r--r--drivers/xen/evtchn.c34
-rw-r--r--drivers/xen/manage.c1
-rw-r--r--drivers/xen/platform-pci.c8
-rw-r--r--drivers/xen/privcmd.c25
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xenbus/xenbus.h3
-rw-r--r--drivers/xen/xenbus/xenbus_client.c8
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c111
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c7
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c34
2319 files changed, 88156 insertions, 35082 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 576228037718..fd11b9ac4cc3 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -135,9 +135,7 @@ obj-$(CONFIG_INFINIBAND) += infiniband/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
-ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
obj-y += clocksource/
-endif
obj-$(CONFIG_DCA) += dca/
obj-$(CONFIG_HID) += hid/
obj-$(CONFIG_PPC_PS3) += ps3/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index edf1558c1105..ebcf534514be 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -395,9 +395,6 @@ config ACPI_CONTAINER
This helps support hotplug of nodes, CPUs, and memory.
- To compile this driver as a module, choose M here:
- the module will be called container.
-
config ACPI_HOTPLUG_MEMORY
bool "Memory Hotplug"
depends on MEMORY_HOTPLUG
@@ -411,9 +408,6 @@ config ACPI_HOTPLUG_MEMORY
removing memory devices at runtime, you need not enable
this driver.
- To compile this driver as a module, choose M here:
- the module will be called acpi_memhotplug.
-
config ACPI_HOTPLUG_IOAPIC
bool
depends on PCI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 44e412506317..076894a3330f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -54,6 +54,7 @@ acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
+acpi-$(CONFIG_X86) += x86/s2idle.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-y += acpi_lpat.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index fb7290338593..d50261d05f3a 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -117,13 +117,6 @@ static inline bool __acpi_aml_busy(void)
return false;
}
-static inline bool __acpi_aml_opened(void)
-{
- if (acpi_aml_io.flags & ACPI_AML_OPEN)
- return true;
- return false;
-}
-
static inline bool __acpi_aml_used(void)
{
return acpi_aml_io.usages ? true : false;
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 4ed755a963aa..8f2dc176bb41 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -319,6 +319,9 @@ static bool matching_id(const char *idstr, const char *list_id)
{
int i;
+ if (strlen(idstr) != strlen(list_id))
+ return false;
+
if (memcmp(idstr, list_id, 3))
return false;
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 5c1e9ea43123..ca28183f4d13 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -151,11 +151,7 @@ void __init acpi_watchdog_init(void)
found = false;
resource_list_for_each_entry(rentry, &resource_list) {
if (rentry->res->flags == res.flags &&
- resource_overlaps(rentry->res, &res)) {
- if (res.start < rentry->res->start)
- rentry->res->start = res.start;
- if (res.end > rentry->res->end)
- rentry->res->end = res.end;
+ resource_union(rentry->res, &res, rentry->res)) {
found = true;
break;
}
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 89101e53324b..94e18bb76556 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -13,7 +13,7 @@
/*
* Common set of includes for all ACPICA source files.
* We put them here because we don't want to duplicate them
- * in the the source code again and again.
+ * in the source code again and again.
*
* Note: The order of these include files is important.
*/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 738d4b231f34..a8a4c8c9b9ef 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -21,7 +21,8 @@ extern u8 acpi_gbl_default_address_spaces[];
/* Local prototypes */
static void
-acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node);
+acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
+ acpi_adr_space_type space_id);
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
@@ -684,10 +685,12 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL,
&info, NULL);
- /* Special case for EC: handle "orphan" _REG methods with no region */
-
- if (space_id == ACPI_ADR_SPACE_EC) {
- acpi_ev_orphan_ec_reg_method(node);
+ /*
+ * Special case for EC and GPIO: handle "orphan" _REG methods with
+ * no region.
+ */
+ if (space_id == ACPI_ADR_SPACE_EC || space_id == ACPI_ADR_SPACE_GPIO) {
+ acpi_ev_execute_orphan_reg_method(node, space_id);
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
@@ -760,31 +763,28 @@ acpi_ev_reg_run(acpi_handle obj_handle,
/*******************************************************************************
*
- * FUNCTION: acpi_ev_orphan_ec_reg_method
+ * FUNCTION: acpi_ev_execute_orphan_reg_method
*
- * PARAMETERS: ec_device_node - Namespace node for an EC device
+ * PARAMETERS: device_node - Namespace node for an ACPI device
+ * space_id - The address space ID
*
* RETURN: None
*
- * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC
+ * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
* device. This is a _REG method that has no corresponding region
- * within the EC device scope. The orphan _REG method appears to
- * have been enabled by the description of the ECDT in the ACPI
- * specification: "The availability of the region space can be
- * detected by providing a _REG method object underneath the
- * Embedded Controller device."
- *
- * To quickly access the EC device, we use the ec_device_node used
- * during EC handler installation. Otherwise, we would need to
- * perform a time consuming namespace walk, executing _HID
- * methods to find the EC device.
+ * within the device's scope. ACPI tables depending on these
+ * "orphan" _REG methods have been seen for both EC and GPIO
+ * Operation Regions. Presumably the Windows ACPI implementation
+ * always calls the _REG method independent of the presence of
+ * an actual Operation Region with the correct address space ID.
*
* MUTEX: Assumes the namespace is locked
*
******************************************************************************/
static void
-acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
+acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
+ acpi_adr_space_type space_id)
{
acpi_handle reg_method;
struct acpi_namespace_node *next_node;
@@ -792,9 +792,9 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
struct acpi_object_list args;
union acpi_object objects[2];
- ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method);
+ ACPI_FUNCTION_TRACE(ev_execute_orphan_reg_method);
- if (!ec_device_node) {
+ if (!device_node) {
return_VOID;
}
@@ -804,7 +804,7 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
/* Get a handle to a _REG method immediately under the EC device */
- status = acpi_get_handle(ec_device_node, METHOD_NAME__REG, &reg_method);
+ status = acpi_get_handle(device_node, METHOD_NAME__REG, &reg_method);
if (ACPI_FAILURE(status)) {
goto exit; /* There is no _REG method present */
}
@@ -816,23 +816,23 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
* with other space IDs to be present; but the code below will then
* execute the _REG method with the embedded_control space_ID argument.
*/
- next_node = acpi_ns_get_next_node(ec_device_node, NULL);
+ next_node = acpi_ns_get_next_node(device_node, NULL);
while (next_node) {
if ((next_node->type == ACPI_TYPE_REGION) &&
(next_node->object) &&
- (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) {
+ (next_node->object->region.space_id == space_id)) {
goto exit; /* Do not execute the _REG */
}
- next_node = acpi_ns_get_next_node(ec_device_node, next_node);
+ next_node = acpi_ns_get_next_node(device_node, next_node);
}
- /* Evaluate the _REG(embedded_control,Connect) method */
+ /* Evaluate the _REG(space_id,Connect) method */
args.count = 2;
args.pointer = objects;
objects[0].type = ACPI_TYPE_INTEGER;
- objects[0].integer.value = ACPI_ADR_SPACE_EC;
+ objects[0].integer.value = space_id;
objects[1].type = ACPI_TYPE_INTEGER;
objects[1].integer.value = ACPI_REG_CONNECT;
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 0cea9c363ace..167a1c2495ab 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -71,11 +71,13 @@ acpi_ns_check_return_value(struct acpi_namespace_node *node,
acpi_status status;
const union acpi_predefined_info *predefined;
+ ACPI_FUNCTION_TRACE(ns_check_return_value);
+
/* If not a predefined name, we cannot validate the return object */
predefined = info->predefined;
if (!predefined) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*
@@ -83,7 +85,7 @@ acpi_ns_check_return_value(struct acpi_namespace_node *node,
* validate the return object
*/
if ((return_status != AE_OK) && (return_status != AE_CTRL_RETURN_VALUE)) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*
@@ -102,7 +104,7 @@ acpi_ns_check_return_value(struct acpi_namespace_node *node,
if (acpi_gbl_disable_auto_repair ||
(!predefined->info.expected_btypes) ||
(predefined->info.expected_btypes == ACPI_RTYPE_ALL)) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*
@@ -163,7 +165,7 @@ exit:
node->flags |= ANOBJ_EVALUATED;
}
- return (status);
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 237b3ddeb075..1875b1cba202 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -59,7 +59,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
u32 count;
u32 i;
- ACPI_FUNCTION_NAME(ns_check_package);
+ ACPI_FUNCTION_TRACE(ns_check_package);
/* The package info for this name is in the next table entry */
@@ -88,14 +88,14 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
*/
if (!count) {
if (package->ret_info.type == ACPI_PTYPE1_VAR) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
info->node_flags,
"Return Package has no elements (empty)"));
- return (AE_AML_OPERAND_VALUE);
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
/*
@@ -152,7 +152,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
package->ret_info.
object_type1, i);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
elements++;
@@ -186,7 +186,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
object_type[i],
i);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
} else {
/* These are the optional package elements */
@@ -198,7 +198,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
tail_object_type,
i);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
}
@@ -214,7 +214,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
acpi_ns_check_object_type(info, elements,
ACPI_RTYPE_INTEGER, 0);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
elements++;
@@ -234,7 +234,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
acpi_ns_check_object_type(info, elements,
ACPI_RTYPE_INTEGER, 0);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
/*
@@ -279,7 +279,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
acpi_ns_wrap_with_package(info, return_object,
return_object_ptr);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
/* Update locals to point to the new package (of 1 element) */
@@ -316,7 +316,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
package->ret_info.
object_type1, 0);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
/* Validate length of the UUID buffer */
@@ -326,14 +326,14 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
info->full_pathname,
info->node_flags,
"Invalid length for UUID Buffer"));
- return (AE_AML_OPERAND_VALUE);
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
status = acpi_ns_check_object_type(info, elements + 1,
package->ret_info.
object_type2, 0);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
elements += 2;
@@ -350,10 +350,10 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
"Invalid internal return type in table entry: %X",
package->ret_info.type));
- return (AE_AML_INTERNAL);
+ return_ACPI_STATUS(AE_AML_INTERNAL);
}
- return (status);
+ return_ACPI_STATUS(status);
package_too_small:
@@ -363,7 +363,7 @@ package_too_small:
"Return Package is too small - found %u elements, expected %u",
count, expected_count));
- return (AE_AML_OPERAND_VALUE);
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
/*******************************************************************************
@@ -708,6 +708,8 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info,
acpi_status status;
u32 i;
+ ACPI_FUNCTION_TRACE(ns_check_package_elements);
+
/*
* Up to two groups of package elements are supported by the data
* structure. All elements in each group must be of the same type.
@@ -717,7 +719,7 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info,
status = acpi_ns_check_object_type(info, this_element,
type1, i + start_index);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
this_element++;
@@ -728,11 +730,11 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info,
type2,
(i + count1 + start_index));
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
this_element++;
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 125143c41bb8..d2c8d8279e7a 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -155,15 +155,17 @@ acpi_ns_complex_repairs(struct acpi_evaluate_info *info,
const struct acpi_repair_info *predefined;
acpi_status status;
+ ACPI_FUNCTION_TRACE(ns_complex_repairs);
+
/* Check if this name is in the list of repairable names */
predefined = acpi_ns_match_complex_repair(node);
if (!predefined) {
- return (validate_status);
+ return_ACPI_STATUS(validate_status);
}
status = predefined->repair_function(info, return_object_ptr);
- return (status);
+ return_ACPI_STATUS(status);
}
/******************************************************************************
@@ -344,17 +346,19 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
u16 original_ref_count;
u32 i;
+ ACPI_FUNCTION_TRACE(ns_repair_CID);
+
/* Check for _CID as a simple string */
if (return_object->common.type == ACPI_TYPE_STRING) {
status = acpi_ns_repair_HID(info, return_object_ptr);
- return (status);
+ return_ACPI_STATUS(status);
}
/* Exit if not a Package */
if (return_object->common.type != ACPI_TYPE_PACKAGE) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/* Examine each element of the _CID package */
@@ -366,7 +370,7 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
status = acpi_ns_repair_HID(info, element_ptr);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
if (original_element != *element_ptr) {
@@ -380,7 +384,7 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
element_ptr++;
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/******************************************************************************
@@ -491,16 +495,15 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
union acpi_operand_object **return_object_ptr)
{
union acpi_operand_object *return_object = *return_object_ptr;
- union acpi_operand_object *new_string;
- char *source;
char *dest;
+ char *source;
ACPI_FUNCTION_NAME(ns_repair_HID);
/* We only care about string _HID objects (not integers) */
if (return_object->common.type != ACPI_TYPE_STRING) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
if (return_object->string.length == 0) {
@@ -511,14 +514,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
/* Return AE_OK anyway, let driver handle it */
info->return_flags |= ACPI_OBJECT_REPAIRED;
- return (AE_OK);
- }
-
- /* It is simplest to always create a new string object */
-
- new_string = acpi_ut_create_string_object(return_object->string.length);
- if (!new_string) {
- return (AE_NO_MEMORY);
+ return_ACPI_STATUS(AE_OK);
}
/*
@@ -530,7 +526,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
source = return_object->string.pointer;
if (*source == '*') {
source++;
- new_string->string.length--;
+ return_object->string.length--;
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
"%s: Removed invalid leading asterisk\n",
@@ -545,13 +541,12 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
* "NNNN####" where N is an uppercase letter or decimal digit, and
* # is a hex digit.
*/
- for (dest = new_string->string.pointer; *source; dest++, source++) {
+ for (dest = return_object->string.pointer; *source; dest++, source++) {
*dest = (char)toupper((int)*source);
}
+ return_object->string.pointer[return_object->string.length] = 0;
- acpi_ut_remove_reference(return_object);
- *return_object_ptr = new_string;
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/******************************************************************************
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 3294cc8dc073..c7fdb12c3310 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -287,7 +287,7 @@ struct apei_res {
};
/* Collect all resources requested, to avoid conflict */
-struct apei_resources apei_resources_all = {
+static struct apei_resources apei_resources_all = {
.iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
.ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
};
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index d4eac6d7e9fb..2494138a6905 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1107,6 +1107,11 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
ncomp = (struct acpi_iort_named_component *)node->node_data;
+ if (!ncomp->memory_address_limit) {
+ pr_warn(FW_BUG "Named component missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1ULL<<ncomp->memory_address_limit;
@@ -1126,6 +1131,11 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
rc = (struct acpi_iort_root_complex *)node->node_data;
+ if (!rc->memory_address_limit) {
+ pr_warn(FW_BUG "Root complex missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = rc->memory_address_limit >= 64 ? U64_MAX :
1ULL<<rc->memory_address_limit;
@@ -1173,8 +1183,8 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
end = dmaaddr + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
- dev->coherent_dma_mask = mask;
- *dev->dma_mask = mask;
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
+ *dev->dma_mask = min(*dev->dma_mask, mask);
}
*dma_addr = dmaaddr;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a852dc4927f7..75aaf94ae0a9 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -414,109 +414,88 @@ end:
return result;
}
+bool acpi_cpc_valid(void)
+{
+ struct cpc_desc *cpc_ptr;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
+ if (!cpc_ptr)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_cpc_valid);
+
/**
- * acpi_get_psd_map - Map the CPUs in a common freq domain.
- * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
+ * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
+ * @cpu: Find all CPUs that share a domain with cpu.
+ * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
*
* Return: 0 for success or negative value for err.
*/
-int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
+int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
{
- int count_target;
- int retval = 0;
- unsigned int i, j;
- cpumask_var_t covered_cpus;
- struct cppc_cpudata *pr, *match_pr;
- struct acpi_psd_package *pdomain;
- struct acpi_psd_package *match_pdomain;
struct cpc_desc *cpc_ptr, *match_cpc_ptr;
-
- if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
- return -ENOMEM;
+ struct acpi_psd_package *match_pdomain;
+ struct acpi_psd_package *pdomain;
+ int count_target, i;
/*
* Now that we have _PSD data from all CPUs, let's setup P-state
* domain info.
*/
- for_each_possible_cpu(i) {
- if (cpumask_test_cpu(i, covered_cpus))
- continue;
-
- pr = all_cpu_data[i];
- cpc_ptr = per_cpu(cpc_desc_ptr, i);
- if (!cpc_ptr) {
- retval = -EFAULT;
- goto err_ret;
- }
+ cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
+ if (!cpc_ptr)
+ return -EFAULT;
- pdomain = &(cpc_ptr->domain_info);
- cpumask_set_cpu(i, pr->shared_cpu_map);
- cpumask_set_cpu(i, covered_cpus);
- if (pdomain->num_processors <= 1)
- continue;
+ pdomain = &(cpc_ptr->domain_info);
+ cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
+ if (pdomain->num_processors <= 1)
+ return 0;
- /* Validate the Domain info */
- count_target = pdomain->num_processors;
- if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
- pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
- pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
- else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
- pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
-
- for_each_possible_cpu(j) {
- if (i == j)
- continue;
-
- match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr) {
- retval = -EFAULT;
- goto err_ret;
- }
+ /* Validate the Domain info */
+ count_target = pdomain->num_processors;
+ if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
+ cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
+ cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
+ cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
- match_pdomain = &(match_cpc_ptr->domain_info);
- if (match_pdomain->domain != pdomain->domain)
- continue;
+ for_each_possible_cpu(i) {
+ if (i == cpu)
+ continue;
- /* Here i and j are in the same domain */
- if (match_pdomain->num_processors != count_target) {
- retval = -EFAULT;
- goto err_ret;
- }
+ match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
+ if (!match_cpc_ptr)
+ goto err_fault;
- if (pdomain->coord_type != match_pdomain->coord_type) {
- retval = -EFAULT;
- goto err_ret;
- }
+ match_pdomain = &(match_cpc_ptr->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
- cpumask_set_cpu(j, covered_cpus);
- cpumask_set_cpu(j, pr->shared_cpu_map);
- }
+ /* Here i and cpu are in the same domain */
+ if (match_pdomain->num_processors != count_target)
+ goto err_fault;
- for_each_cpu(j, pr->shared_cpu_map) {
- if (i == j)
- continue;
+ if (pdomain->coord_type != match_pdomain->coord_type)
+ goto err_fault;
- match_pr = all_cpu_data[j];
- match_pr->shared_type = pr->shared_type;
- cpumask_copy(match_pr->shared_cpu_map,
- pr->shared_cpu_map);
- }
+ cpumask_set_cpu(i, cpu_data->shared_cpu_map);
}
- goto out;
-err_ret:
- for_each_possible_cpu(i) {
- pr = all_cpu_data[i];
+ return 0;
- /* Assume no coordination on any error parsing domain info */
- cpumask_clear(pr->shared_cpu_map);
- cpumask_set_cpu(i, pr->shared_cpu_map);
- pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- }
-out:
- free_cpumask_var(covered_cpus);
- return retval;
+err_fault:
+ /* Assume no coordination on any error parsing domain info */
+ cpumask_clear(cpu_data->shared_cpu_map);
+ cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
+ cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
+
+ return -EFAULT;
}
EXPORT_SYMBOL_GPL(acpi_get_psd_map);
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 96869f1538b9..bfca116482b8 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len < 0)
- return len;
-
- env->buflen += len;
- if (!adev->data.of_compatible)
- return 0;
-
- if (len > 0 && add_uevent_var(env, "MODALIAS="))
- return -ENOMEM;
-
- len = create_of_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
+ if (adev->data.of_compatible)
+ len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ else
+ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e0cb1bcfffb2..13565629ce0a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -169,7 +169,7 @@ struct acpi_ec_query {
};
static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
-static void advance_transaction(struct acpi_ec *ec);
+static void advance_transaction(struct acpi_ec *ec, bool interrupt);
static void acpi_ec_event_handler(struct work_struct *work);
static void acpi_ec_event_processor(struct work_struct *work);
@@ -335,12 +335,12 @@ static const char *acpi_ec_cmd_string(u8 cmd)
* GPE Registers
* -------------------------------------------------------------------------- */
-static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
+static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec)
{
acpi_event_status gpe_status = 0;
(void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
- return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
+ return !!(gpe_status & ACPI_EVENT_FLAG_STATUS_SET);
}
static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
@@ -351,14 +351,14 @@ static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
BUG_ON(ec->reference_count < 1);
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
}
- if (acpi_ec_is_gpe_raised(ec)) {
+ if (acpi_ec_gpe_status_set(ec)) {
/*
* On some platforms, EN=1 writes cannot trigger GPE. So
* software need to manually trigger a pseudo GPE event on
* EN=1 writes.
*/
ec_dbg_raw("Polling quirk");
- advance_transaction(ec);
+ advance_transaction(ec, false);
}
}
@@ -372,23 +372,6 @@ static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
}
}
-static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
-{
- /*
- * GPE STS is a W1C register, which means:
- * 1. Software can clear it without worrying about clearing other
- * GPEs' STS bits when the hardware sets them in parallel.
- * 2. As long as software can ensure only clearing it when it is
- * set, hardware won't set it in parallel.
- * So software can clear GPE in any contexts.
- * Warning: do not move the check into advance_transaction() as the
- * EC commands will be sent without GPE raised.
- */
- if (!acpi_ec_is_gpe_raised(ec))
- return;
- acpi_clear_gpe(NULL, ec->gpe);
-}
-
/* --------------------------------------------------------------------------
* Transaction Management
* -------------------------------------------------------------------------- */
@@ -488,7 +471,7 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
* Unconditionally invoke this once after enabling the event
* handling mechanism to detect the pending events.
*/
- advance_transaction(ec);
+ advance_transaction(ec, false);
}
static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
@@ -632,24 +615,41 @@ static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long f
}
}
-static void advance_transaction(struct acpi_ec *ec)
+static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t)
{
- struct transaction *t;
- u8 status;
+ if (t->irq_count < ec_storm_threshold)
+ ++t->irq_count;
+
+ /* Trigger if the threshold is 0 too. */
+ if (t->irq_count == ec_storm_threshold)
+ acpi_ec_mask_events(ec);
+}
+
+static void advance_transaction(struct acpi_ec *ec, bool interrupt)
+{
+ struct transaction *t = ec->curr;
bool wakeup = false;
+ u8 status;
+
+ ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
- ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
- smp_processor_id());
/*
- * By always clearing STS before handling all indications, we can
- * ensure a hardware STS 0->1 change after this clearing can always
- * trigger a GPE interrupt.
+ * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
+ * changes to always trigger a GPE interrupt.
+ *
+ * GPE STS is a W1C register, which means:
+ *
+ * 1. Software can clear it without worrying about clearing the other
+ * GPEs' STS bits when the hardware sets them in parallel.
+ *
+ * 2. As long as software can ensure only clearing it when it is set,
+ * hardware won't set it in parallel.
*/
- if (ec->gpe >= 0)
- acpi_ec_clear_gpe(ec);
+ if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
+ acpi_clear_gpe(NULL, ec->gpe);
status = acpi_ec_read_status(ec);
- t = ec->curr;
+
/*
* Another IRQ or a guarded polling mode advancement is detected,
* the next QR_EC submission is then allowed.
@@ -661,56 +661,43 @@ static void advance_transaction(struct acpi_ec *ec)
clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
acpi_ec_complete_query(ec);
}
+ if (!t)
+ goto out;
}
- if (!t)
- goto err;
+
if (t->flags & ACPI_EC_COMMAND_POLL) {
if (t->wlen > t->wi) {
- if ((status & ACPI_EC_FLAG_IBF) == 0)
+ if (!(status & ACPI_EC_FLAG_IBF))
acpi_ec_write_data(ec, t->wdata[t->wi++]);
- else
- goto err;
+ else if (interrupt && !(status & ACPI_EC_FLAG_SCI))
+ acpi_ec_spurious_interrupt(ec, t);
} else if (t->rlen > t->ri) {
- if ((status & ACPI_EC_FLAG_OBF) == 1) {
+ if (status & ACPI_EC_FLAG_OBF) {
t->rdata[t->ri++] = acpi_ec_read_data(ec);
if (t->rlen == t->ri) {
ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
+ wakeup = true;
if (t->command == ACPI_EC_COMMAND_QUERY)
ec_dbg_evt("Command(%s) completed by hardware",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- wakeup = true;
}
- } else
- goto err;
- } else if (t->wlen == t->wi &&
- (status & ACPI_EC_FLAG_IBF) == 0) {
+ } else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) {
+ acpi_ec_spurious_interrupt(ec, t);
+ }
+ } else if (t->wlen == t->wi && !(status & ACPI_EC_FLAG_IBF)) {
ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
wakeup = true;
}
- goto out;
} else if (!(status & ACPI_EC_FLAG_IBF)) {
acpi_ec_write_cmd(ec, t->command);
ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
- goto out;
- }
-err:
- /*
- * If SCI bit is set, then don't think it's a false IRQ
- * otherwise will take a not handled IRQ as a false one.
- */
- if (!(status & ACPI_EC_FLAG_SCI)) {
- if (in_interrupt() && t) {
- if (t->irq_count < ec_storm_threshold)
- ++t->irq_count;
- /* Allow triggering on 0 threshold */
- if (t->irq_count == ec_storm_threshold)
- acpi_ec_mask_events(ec);
- }
}
+
out:
if (status & ACPI_EC_FLAG_SCI)
acpi_ec_submit_query(ec);
- if (wakeup && in_interrupt())
+
+ if (wakeup && interrupt)
wake_up(&ec->wait);
}
@@ -767,7 +754,7 @@ static int ec_poll(struct acpi_ec *ec)
if (!ec_guard(ec))
return 0;
spin_lock_irqsave(&ec->lock, flags);
- advance_transaction(ec);
+ advance_transaction(ec, false);
spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
@@ -1216,7 +1203,7 @@ static void acpi_ec_check_event(struct acpi_ec *ec)
* taking care of it.
*/
if (!ec->curr)
- advance_transaction(ec);
+ advance_transaction(ec, false);
spin_unlock_irqrestore(&ec->lock, flags);
}
}
@@ -1259,7 +1246,7 @@ static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
- advance_transaction(ec);
+ advance_transaction(ec, true);
spin_unlock_irqrestore(&ec->lock, flags);
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e3638bafb941..e6a5d997241c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
extern struct list_head acpi_bus_id_list;
struct acpi_device_bus_id {
- char bus_id[15];
+ const char *bus_id;
unsigned int instance_no;
struct list_head node;
};
@@ -105,7 +105,8 @@ struct acpi_device_bus_id {
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *));
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
- int type, unsigned long long sta);
+ int type, unsigned long long sta,
+ struct acpi_device_info *info);
int acpi_device_setup_files(struct acpi_device *dev);
void acpi_device_remove_files(struct acpi_device *dev);
void acpi_device_add_finalize(struct acpi_device *device);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 442608220b5c..8c5dde628405 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -5,6 +5,7 @@
#include <linux/list_sort.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
+#include <linux/nospec.h>
#include <linux/mutex.h>
#include <linux/ndctl.h>
#include <linux/sysfs.h>
@@ -282,18 +283,19 @@ err:
static union acpi_object *int_to_buf(union acpi_object *integer)
{
- union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
+ union acpi_object *buf = NULL;
void *dst = NULL;
- if (!buf)
- goto err;
-
if (integer->type != ACPI_TYPE_INTEGER) {
WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
integer->type);
goto err;
}
+ buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
+ if (!buf)
+ goto err;
+
dst = buf + 1;
buf->type = ACPI_TYPE_BUFFER;
buf->buffer.length = 4;
@@ -478,8 +480,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nd_desc->cmd_mask;
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
family = call_pkg->nd_family;
- if (!test_bit(family, &nd_desc->bus_family_mask))
+ if (family > NVDIMM_BUS_FAMILY_MAX ||
+ !test_bit(family, &nd_desc->bus_family_mask))
return -EINVAL;
+ family = array_index_nospec(family,
+ NVDIMM_BUS_FAMILY_MAX + 1);
dsm_mask = acpi_desc->family_dsm_mask[family];
guid = to_nfit_bus_uuid(family);
} else {
@@ -2264,40 +2269,24 @@ static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
/* enough info to uniquely specify an interleave set */
struct nfit_set_info {
- struct nfit_set_info_map {
- u64 region_offset;
- u32 serial_number;
- u32 pad;
- } mapping[0];
+ u64 region_offset;
+ u32 serial_number;
+ u32 pad;
};
struct nfit_set_info2 {
- struct nfit_set_info_map2 {
- u64 region_offset;
- u32 serial_number;
- u16 vendor_id;
- u16 manufacturing_date;
- u8 manufacturing_location;
- u8 reserved[31];
- } mapping[0];
+ u64 region_offset;
+ u32 serial_number;
+ u16 vendor_id;
+ u16 manufacturing_date;
+ u8 manufacturing_location;
+ u8 reserved[31];
};
-static size_t sizeof_nfit_set_info(int num_mappings)
-{
- return sizeof(struct nfit_set_info)
- + num_mappings * sizeof(struct nfit_set_info_map);
-}
-
-static size_t sizeof_nfit_set_info2(int num_mappings)
-{
- return sizeof(struct nfit_set_info2)
- + num_mappings * sizeof(struct nfit_set_info_map2);
-}
-
static int cmp_map_compat(const void *m0, const void *m1)
{
- const struct nfit_set_info_map *map0 = m0;
- const struct nfit_set_info_map *map1 = m1;
+ const struct nfit_set_info *map0 = m0;
+ const struct nfit_set_info *map1 = m1;
return memcmp(&map0->region_offset, &map1->region_offset,
sizeof(u64));
@@ -2305,8 +2294,8 @@ static int cmp_map_compat(const void *m0, const void *m1)
static int cmp_map(const void *m0, const void *m1)
{
- const struct nfit_set_info_map *map0 = m0;
- const struct nfit_set_info_map *map1 = m1;
+ const struct nfit_set_info *map0 = m0;
+ const struct nfit_set_info *map1 = m1;
if (map0->region_offset < map1->region_offset)
return -1;
@@ -2317,8 +2306,8 @@ static int cmp_map(const void *m0, const void *m1)
static int cmp_map2(const void *m0, const void *m1)
{
- const struct nfit_set_info_map2 *map0 = m0;
- const struct nfit_set_info_map2 *map1 = m1;
+ const struct nfit_set_info2 *map0 = m0;
+ const struct nfit_set_info2 *map1 = m1;
if (map0->region_offset < map1->region_offset)
return -1;
@@ -2356,22 +2345,22 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
return -ENOMEM;
import_guid(&nd_set->type_guid, spa->range_guid);
- info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
+ info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
+ info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL);
if (!info2)
return -ENOMEM;
for (i = 0; i < nr; i++) {
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
- struct nfit_set_info_map *map = &info->mapping[i];
- struct nfit_set_info_map2 *map2 = &info2->mapping[i];
struct nvdimm *nvdimm = mapping->nvdimm;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
- spa->range_index, i);
+ struct nfit_set_info *map = &info[i];
+ struct nfit_set_info2 *map2 = &info2[i];
+ struct acpi_nfit_memory_map *memdev =
+ memdev_from_spa(acpi_desc, spa->range_index, i);
struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
if (!memdev || !nfit_mem->dcr) {
@@ -2390,23 +2379,20 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
}
/* v1.1 namespaces */
- sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
- cmp_map, NULL);
- nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ sort(info, nr, sizeof(*info), cmp_map, NULL);
+ nd_set->cookie1 = nd_fletcher64(info, sizeof(*info) * nr, 0);
/* v1.2 namespaces */
- sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
- cmp_map2, NULL);
- nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
+ sort(info2, nr, sizeof(*info2), cmp_map2, NULL);
+ nd_set->cookie2 = nd_fletcher64(info2, sizeof(*info2) * nr, 0);
/* support v1.1 namespaces created with the wrong sort order */
- sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
- cmp_map_compat, NULL);
- nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ sort(info, nr, sizeof(*info), cmp_map_compat, NULL);
+ nd_set->altcookie = nd_fletcher64(info, sizeof(*info) * nr, 0);
/* record the result of the sort for the mapping position */
for (i = 0; i < nr; i++) {
- struct nfit_set_info_map2 *map2 = &info2->mapping[i];
+ struct nfit_set_info2 *map2 = &info2[i];
int j;
for (j = 0; j < nr; j++) {
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index c12b5fb3e8fb..0bf072cef6cf 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -722,9 +722,7 @@ static void acpi_pci_root_validate_resources(struct device *dev,
* our resources no longer match the ACPI _CRS, but
* the kernel resource tree doesn't allow overlaps.
*/
- if (resource_overlaps(res1, res2)) {
- res2->start = min(res1->start, res2->start);
- res2->end = max(res1->end, res2->end);
+ if (resource_union(res1, res2, res2)) {
dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n",
res2, res1);
free = true;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 8048da85b7e0..189a0d4c6d06 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -939,7 +939,7 @@ int acpi_add_power_resource(acpi_handle handle)
device = &resource->device;
acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
- ACPI_STA_DEFAULT);
+ ACPI_STA_DEFAULT, NULL);
mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->list_node);
INIT_LIST_HEAD(&resource->dependents);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f66236cff69b..d93e400940a3 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -31,7 +31,6 @@
#include <asm/apic.h>
#endif
-#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index b04a68950ff1..32f0f554ccae 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -22,7 +22,6 @@
#define PREFIX "ACPI: "
-#define ACPI_PROCESSOR_CLASS "processor"
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_perflib");
@@ -616,7 +615,6 @@ int acpi_processor_preregister_performance(
continue;
pr->performance = per_cpu_ptr(performance, i);
- cpumask_set_cpu(i, pr->performance->shared_cpu_map);
pdomain = &(pr->performance->domain_info);
if (acpi_processor_get_psd(pr->handle, pdomain)) {
retval = -EINVAL;
@@ -710,7 +708,7 @@ err_ret:
if (retval) {
cpumask_clear(pr->performance->shared_cpu_map);
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
- pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
}
pr->performance = NULL; /* Will be set for real in register */
}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 6c7d05b37c98..677a132be242 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -19,8 +19,6 @@
#define PREFIX "ACPI: "
-#define ACPI_PROCESSOR_CLASS "processor"
-
#ifdef CONFIG_CPU_FREQ
/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index a0bd56ece3ff..b1876534324b 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -22,7 +22,6 @@
#define PREFIX "ACPI: "
-#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_throttling");
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 58203193417e..20a7892c6d3f 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -534,7 +534,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
ret = c->preproc(ares, c->preproc_data);
if (ret < 0) {
c->error = ret;
- return AE_CTRL_TERMINATE;
+ return AE_ABORT_METHOD;
} else if (ret > 0) {
return AE_OK;
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index e6d9f4de2800..3b0b6dd34914 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -711,26 +711,4 @@ static struct acpi_driver acpi_sbs_driver = {
},
.drv.pm = &acpi_sbs_pm,
};
-
-static int __init acpi_sbs_init(void)
-{
- int result = 0;
-
- if (acpi_disabled)
- return -ENODEV;
-
- result = acpi_bus_register_driver(&acpi_sbs_driver);
- if (result < 0)
- return -ENODEV;
-
- return 0;
-}
-
-static void __exit acpi_sbs_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_sbs_driver);
- return;
-}
-
-module_init(acpi_sbs_init);
-module_exit(acpi_sbs_exit);
+module_acpi_driver(acpi_sbs_driver);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 519963bcc047..22566b4b3150 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -51,8 +51,8 @@ static u64 spcr_uart_addr;
struct acpi_dep_data {
struct list_head node;
- acpi_handle master;
- acpi_handle slave;
+ acpi_handle supplier;
+ acpi_handle consumer;
};
void acpi_scan_lock_acquire(void)
@@ -486,6 +486,7 @@ static void acpi_device_del(struct acpi_device *device)
acpi_device_bus_id->instance_no--;
else {
list_del(&acpi_device_bus_id->node);
+ kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
}
break;
@@ -585,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
if (!device)
return -EINVAL;
+ *device = NULL;
+
status = acpi_get_data_full(handle, acpi_scan_drop_device,
(void **)device, callback);
if (ACPI_FAILURE(status) || !*device) {
@@ -674,7 +677,14 @@ int acpi_device_add(struct acpi_device *device,
}
if (!found) {
acpi_device_bus_id = new_bus_id;
- strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
+ acpi_device_bus_id->bus_id =
+ kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
+ if (!acpi_device_bus_id->bus_id) {
+ pr_err(PREFIX "Memory allocation error for bus id\n");
+ result = -ENOMEM;
+ goto err_free_new_bus_id;
+ }
+
acpi_device_bus_id->instance_no = 0;
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
@@ -709,6 +719,11 @@ int acpi_device_add(struct acpi_device *device,
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
+
+ err_free_new_bus_id:
+ if (!found)
+ kfree(new_bus_id);
+
mutex_unlock(&acpi_device_lock);
err_detach:
@@ -719,6 +734,43 @@ int acpi_device_add(struct acpi_device *device,
/* --------------------------------------------------------------------------
Device Enumeration
-------------------------------------------------------------------------- */
+static bool acpi_info_matches_ids(struct acpi_device_info *info,
+ const char * const ids[])
+{
+ struct acpi_pnp_device_id_list *cid_list = NULL;
+ int i;
+
+ if (!(info->valid & ACPI_VALID_HID))
+ return false;
+
+ if (info->valid & ACPI_VALID_CID)
+ cid_list = &info->compatible_id_list;
+
+ for (i = 0; ids[i]; i++) {
+ int j;
+
+ if (!strcmp(info->hardware_id.string, ids[i]))
+ return true;
+
+ if (!cid_list)
+ continue;
+
+ for (j = 0; j < cid_list->count; j++) {
+ if (!strcmp(cid_list->ids[j].string, ids[i]))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* List of HIDs for which we ignore matching ACPI devices, when checking _DEP lists. */
+static const char * const acpi_ignore_dep_ids[] = {
+ "PNP0D80", /* Windows-compatible System Power Management Controller */
+ "INT33BD", /* Intel Baytrail Mailbox Device */
+ NULL
+};
+
static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
{
struct acpi_device *device = NULL;
@@ -1236,10 +1288,8 @@ static bool acpi_object_is_system_bus(acpi_handle handle)
}
static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
- int device_type)
+ int device_type, struct acpi_device_info *info)
{
- acpi_status status;
- struct acpi_device_info *info;
struct acpi_pnp_device_id_list *cid_list;
int i;
@@ -1250,8 +1300,7 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
break;
}
- status = acpi_get_object_info(handle, &info);
- if (ACPI_FAILURE(status)) {
+ if (!info) {
pr_err(PREFIX "%s: Error reading device info\n",
__func__);
return;
@@ -1276,8 +1325,6 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
if (info->valid & ACPI_VALID_CLS)
acpi_add_id(pnp, info->class_code.string);
- kfree(info);
-
/*
* Some devices don't reliably have _HIDs & _CIDs, so add
* synthetic HIDs to make sure drivers can find them.
@@ -1583,7 +1630,8 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
}
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
- int type, unsigned long long sta)
+ int type, unsigned long long sta,
+ struct acpi_device_info *info)
{
INIT_LIST_HEAD(&device->pnp.ids);
device->device_type = type;
@@ -1592,7 +1640,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
fwnode_init(&device->fwnode, &acpi_device_fwnode_ops);
acpi_set_device_status(device, sta);
acpi_device_get_busid(device);
- acpi_set_pnp_ids(handle, &device->pnp, type);
+ acpi_set_pnp_ids(handle, &device->pnp, type, info);
acpi_init_properties(device);
acpi_bus_get_flags(device);
device->flags.match_driver = false;
@@ -1603,8 +1651,6 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
- /* Assume there are unmet deps until acpi_device_dep_initialize() runs */
- device->dep_unmet = 1;
}
void acpi_device_add_finalize(struct acpi_device *device)
@@ -1620,14 +1666,20 @@ static int acpi_add_single_object(struct acpi_device **child,
int result;
struct acpi_device *device;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_device_info *info = NULL;
+
+ if (handle != ACPI_ROOT_OBJECT && type == ACPI_BUS_TYPE_DEVICE)
+ acpi_get_object_info(handle, &info);
device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
if (!device) {
printk(KERN_ERR PREFIX "Memory allocation error\n");
+ kfree(info);
return -ENOMEM;
}
- acpi_init_device_object(device, handle, type, sta);
+ acpi_init_device_object(device, handle, type, sta, info);
+ kfree(info);
/*
* For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so
* that we can call acpi_bus_get_status() and use its quirk handling.
@@ -1804,67 +1856,84 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev)
}
}
-static void acpi_device_dep_initialize(struct acpi_device *adev)
+static u32 acpi_scan_check_dep(acpi_handle handle)
{
- struct acpi_dep_data *dep;
struct acpi_handle_list dep_devices;
acpi_status status;
+ u32 count;
int i;
- adev->dep_unmet = 0;
-
- if (!acpi_has_method(adev->handle, "_DEP"))
- return;
+ /*
+ * Check for _HID here to avoid deferring the enumeration of:
+ * 1. PCI devices.
+ * 2. ACPI nodes describing USB ports.
+ * Still, checking for _HID catches more then just these cases ...
+ */
+ if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID"))
+ return 0;
- status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
- &dep_devices);
+ status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
if (ACPI_FAILURE(status)) {
- dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
- return;
+ acpi_handle_debug(handle, "Failed to evaluate _DEP.\n");
+ return 0;
}
- for (i = 0; i < dep_devices.count; i++) {
+ for (count = 0, i = 0; i < dep_devices.count; i++) {
struct acpi_device_info *info;
- int skip;
+ struct acpi_dep_data *dep;
+ bool skip;
status = acpi_get_object_info(dep_devices.handles[i], &info);
if (ACPI_FAILURE(status)) {
- dev_dbg(&adev->dev, "Error reading _DEP device info\n");
+ acpi_handle_debug(handle, "Error reading _DEP device info\n");
continue;
}
- /*
- * Skip the dependency of Windows System Power
- * Management Controller
- */
- skip = info->valid & ACPI_VALID_HID &&
- !strcmp(info->hardware_id.string, "INT3396");
-
+ skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids);
kfree(info);
if (skip)
continue;
- dep = kzalloc(sizeof(struct acpi_dep_data), GFP_KERNEL);
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
- return;
+ continue;
- dep->master = dep_devices.handles[i];
- dep->slave = adev->handle;
- adev->dep_unmet++;
+ count++;
+
+ dep->supplier = dep_devices.handles[i];
+ dep->consumer = handle;
mutex_lock(&acpi_dep_list_lock);
list_add_tail(&dep->node , &acpi_dep_list);
mutex_unlock(&acpi_dep_list_lock);
}
+
+ return count;
+}
+
+static void acpi_scan_dep_init(struct acpi_device *adev)
+{
+ struct acpi_dep_data *dep;
+
+ mutex_lock(&acpi_dep_list_lock);
+
+ list_for_each_entry(dep, &acpi_dep_list, node) {
+ if (dep->consumer == adev->handle)
+ adev->dep_unmet++;
+ }
+
+ mutex_unlock(&acpi_dep_list_lock);
}
-static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **return_value)
+static bool acpi_bus_scan_second_pass;
+
+static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
+ struct acpi_device **adev_p)
{
struct acpi_device *device = NULL;
- int type;
unsigned long long sta;
+ int type;
int result;
acpi_bus_get_device(handle, &device);
@@ -1880,20 +1949,42 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
}
+ if (type == ACPI_BUS_TYPE_DEVICE && check_dep) {
+ u32 count = acpi_scan_check_dep(handle);
+ /* Bail out if the number of recorded dependencies is not 0. */
+ if (count > 0) {
+ acpi_bus_scan_second_pass = true;
+ return AE_CTRL_DEPTH;
+ }
+ }
+
acpi_add_single_object(&device, handle, type, sta);
if (!device)
return AE_CTRL_DEPTH;
acpi_scan_init_hotplug(device);
- acpi_device_dep_initialize(device);
+ if (!check_dep)
+ acpi_scan_dep_init(device);
- out:
- if (!*return_value)
- *return_value = device;
+out:
+ if (!*adev_p)
+ *adev_p = device;
return AE_OK;
}
+static acpi_status acpi_bus_check_add_1(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_p)
+{
+ return acpi_bus_check_add(handle, true, (struct acpi_device **)ret_p);
+}
+
+static acpi_status acpi_bus_check_add_2(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_p)
+{
+ return acpi_bus_check_add(handle, false, (struct acpi_device **)ret_p);
+}
+
static void acpi_default_enumeration(struct acpi_device *device)
{
/*
@@ -1961,12 +2052,16 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
return ret;
}
-static void acpi_bus_attach(struct acpi_device *device)
+static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
{
struct acpi_device *child;
+ bool skip = !first_pass && device->flags.visited;
acpi_handle ejd;
int ret;
+ if (skip)
+ goto ok;
+
if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
register_dock_dependent_device(device, ejd);
@@ -2013,9 +2108,9 @@ static void acpi_bus_attach(struct acpi_device *device)
ok:
list_for_each_entry(child, &device->children, node)
- acpi_bus_attach(child);
+ acpi_bus_attach(child, first_pass);
- if (device->handler && device->handler->hotplug.notify_online)
+ if (!skip && device->handler && device->handler->hotplug.notify_online)
device->handler->hotplug.notify_online(device);
}
@@ -2026,14 +2121,15 @@ void acpi_walk_dep_device_list(acpi_handle handle)
mutex_lock(&acpi_dep_list_lock);
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
- if (dep->master == handle) {
- acpi_bus_get_device(dep->slave, &adev);
- if (!adev)
- continue;
+ if (dep->supplier == handle) {
+ acpi_bus_get_device(dep->consumer, &adev);
+
+ if (adev) {
+ adev->dep_unmet--;
+ if (!adev->dep_unmet)
+ acpi_bus_attach(adev, true);
+ }
- adev->dep_unmet--;
- if (!adev->dep_unmet)
- acpi_bus_attach(adev);
list_del(&dep->node);
kfree(dep);
}
@@ -2058,17 +2154,37 @@ EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list);
*/
int acpi_bus_scan(acpi_handle handle)
{
- void *device = NULL;
+ struct acpi_device *device = NULL;
+
+ acpi_bus_scan_second_pass = false;
- if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device)))
+ /* Pass 1: Avoid enumerating devices with missing dependencies. */
+
+ if (ACPI_SUCCESS(acpi_bus_check_add(handle, true, &device)))
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- acpi_bus_check_add, NULL, NULL, &device);
+ acpi_bus_check_add_1, NULL, NULL,
+ (void **)&device);
+
+ if (!device)
+ return -ENODEV;
+
+ acpi_bus_attach(device, true);
- if (device) {
- acpi_bus_attach(device);
+ if (!acpi_bus_scan_second_pass)
return 0;
- }
- return -ENODEV;
+
+ /* Pass 2: Enumerate all of the remaining devices. */
+
+ device = NULL;
+
+ if (ACPI_SUCCESS(acpi_bus_check_add(handle, false, &device)))
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_check_add_2, NULL, NULL,
+ (void **)&device);
+
+ acpi_bus_attach(device, false);
+
+ return 0;
}
EXPORT_SYMBOL(acpi_bus_scan);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index aff13bf4d947..09fd13757b65 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -92,10 +92,6 @@ bool acpi_sleep_state_supported(u8 sleep_state)
}
#ifdef CONFIG_ACPI_SLEEP
-static bool sleep_no_lps0 __read_mostly;
-module_param(sleep_no_lps0, bool, 0644);
-MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
-
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
u32 acpi_target_system_state(void)
@@ -165,7 +161,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
-static bool acpi_sleep_default_s3;
+bool acpi_sleep_default_s3;
static int __init init_default_s3(const struct dmi_system_id *d)
{
@@ -688,268 +684,13 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
static bool s2idle_wakeup;
-/*
- * On platforms supporting the Low Power S0 Idle interface there is an ACPI
- * device object with the PNP0D80 compatible device ID (System Power Management
- * Controller) and a specific _DSM method under it. That method, if present,
- * can be used to indicate to the platform that the OS is transitioning into a
- * low-power state in which certain types of activity are not desirable or that
- * it is leaving such a state, which allows the platform to adjust its operation
- * mode accordingly.
- */
-static const struct acpi_device_id lps0_device_ids[] = {
- {"PNP0D80", },
- {"", },
-};
-
-#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
-
-#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
-#define ACPI_LPS0_SCREEN_OFF 3
-#define ACPI_LPS0_SCREEN_ON 4
-#define ACPI_LPS0_ENTRY 5
-#define ACPI_LPS0_EXIT 6
-
-static acpi_handle lps0_device_handle;
-static guid_t lps0_dsm_guid;
-static char lps0_dsm_func_mask;
-
-/* Device constraint entry structure */
-struct lpi_device_info {
- char *name;
- int enabled;
- union acpi_object *package;
-};
-
-/* Constraint package structure */
-struct lpi_device_constraint {
- int uid;
- int min_dstate;
- int function_states;
-};
-
-struct lpi_constraints {
- acpi_handle handle;
- int min_dstate;
-};
-
-static struct lpi_constraints *lpi_constraints_table;
-static int lpi_constraints_table_size;
-
-static void lpi_device_get_constraints(void)
-{
- union acpi_object *out_obj;
- int i;
-
- out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
- 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
- NULL, ACPI_TYPE_PACKAGE);
-
- acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
- out_obj ? "successful" : "failed");
-
- if (!out_obj)
- return;
-
- lpi_constraints_table = kcalloc(out_obj->package.count,
- sizeof(*lpi_constraints_table),
- GFP_KERNEL);
- if (!lpi_constraints_table)
- goto free_acpi_buffer;
-
- acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
-
- for (i = 0; i < out_obj->package.count; i++) {
- struct lpi_constraints *constraint;
- acpi_status status;
- union acpi_object *package = &out_obj->package.elements[i];
- struct lpi_device_info info = { };
- int package_count = 0, j;
-
- if (!package)
- continue;
-
- for (j = 0; j < package->package.count; ++j) {
- union acpi_object *element =
- &(package->package.elements[j]);
-
- switch (element->type) {
- case ACPI_TYPE_INTEGER:
- info.enabled = element->integer.value;
- break;
- case ACPI_TYPE_STRING:
- info.name = element->string.pointer;
- break;
- case ACPI_TYPE_PACKAGE:
- package_count = element->package.count;
- info.package = element->package.elements;
- break;
- }
- }
-
- if (!info.enabled || !info.package || !info.name)
- continue;
-
- constraint = &lpi_constraints_table[lpi_constraints_table_size];
-
- status = acpi_get_handle(NULL, info.name, &constraint->handle);
- if (ACPI_FAILURE(status))
- continue;
-
- acpi_handle_debug(lps0_device_handle,
- "index:%d Name:%s\n", i, info.name);
-
- constraint->min_dstate = -1;
-
- for (j = 0; j < package_count; ++j) {
- union acpi_object *info_obj = &info.package[j];
- union acpi_object *cnstr_pkg;
- union acpi_object *obj;
- struct lpi_device_constraint dev_info;
-
- switch (info_obj->type) {
- case ACPI_TYPE_INTEGER:
- /* version */
- break;
- case ACPI_TYPE_PACKAGE:
- if (info_obj->package.count < 2)
- break;
-
- cnstr_pkg = info_obj->package.elements;
- obj = &cnstr_pkg[0];
- dev_info.uid = obj->integer.value;
- obj = &cnstr_pkg[1];
- dev_info.min_dstate = obj->integer.value;
-
- acpi_handle_debug(lps0_device_handle,
- "uid:%d min_dstate:%s\n",
- dev_info.uid,
- acpi_power_state_string(dev_info.min_dstate));
-
- constraint->min_dstate = dev_info.min_dstate;
- break;
- }
- }
-
- if (constraint->min_dstate < 0) {
- acpi_handle_debug(lps0_device_handle,
- "Incomplete constraint defined\n");
- continue;
- }
-
- lpi_constraints_table_size++;
- }
-
- acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
-
-free_acpi_buffer:
- ACPI_FREE(out_obj);
-}
-
-static void lpi_check_constraints(void)
-{
- int i;
-
- for (i = 0; i < lpi_constraints_table_size; ++i) {
- acpi_handle handle = lpi_constraints_table[i].handle;
- struct acpi_device *adev;
-
- if (!handle || acpi_bus_get_device(handle, &adev))
- continue;
-
- acpi_handle_debug(handle,
- "LPI: required min power state:%s current power state:%s\n",
- acpi_power_state_string(lpi_constraints_table[i].min_dstate),
- acpi_power_state_string(adev->power.state));
-
- if (!adev->flags.power_manageable) {
- acpi_handle_info(handle, "LPI: Device not power manageable\n");
- lpi_constraints_table[i].handle = NULL;
- continue;
- }
-
- if (adev->power.state < lpi_constraints_table[i].min_dstate)
- acpi_handle_info(handle,
- "LPI: Constraint not met; min power state:%s current power state:%s\n",
- acpi_power_state_string(lpi_constraints_table[i].min_dstate),
- acpi_power_state_string(adev->power.state));
- }
-}
-
-static void acpi_sleep_run_lps0_dsm(unsigned int func)
-{
- union acpi_object *out_obj;
-
- if (!(lps0_dsm_func_mask & (1 << func)))
- return;
-
- out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, 1, func, NULL);
- ACPI_FREE(out_obj);
-
- acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
- func, out_obj ? "successful" : "failed");
-}
-
-static int lps0_device_attach(struct acpi_device *adev,
- const struct acpi_device_id *not_used)
-{
- union acpi_object *out_obj;
-
- if (lps0_device_handle)
- return 0;
-
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return 0;
-
- guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
- /* Check if the _DSM is present and as expected. */
- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
- if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
- acpi_handle_debug(adev->handle,
- "_DSM function 0 evaluation failed\n");
- return 0;
- }
-
- lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
-
- ACPI_FREE(out_obj);
-
- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
- lps0_dsm_func_mask);
-
- lps0_device_handle = adev->handle;
-
- lpi_device_get_constraints();
-
- /*
- * Use suspend-to-idle by default if the default suspend mode was not
- * set from the command line.
- */
- if (mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3)
- mem_sleep_current = PM_SUSPEND_TO_IDLE;
-
- /*
- * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
- * EC GPE to be enabled while suspended for certain wakeup devices to
- * work, so mark it as wakeup-capable.
- */
- acpi_ec_mark_gpe_for_wake();
-
- return 0;
-}
-
-static struct acpi_scan_handler lps0_handler = {
- .ids = lps0_device_ids,
- .attach = lps0_device_attach,
-};
-
-static int acpi_s2idle_begin(void)
+int acpi_s2idle_begin(void)
{
acpi_scan_lock_acquire();
return 0;
}
-static int acpi_s2idle_prepare(void)
+int acpi_s2idle_prepare(void)
{
if (acpi_sci_irq_valid()) {
enable_irq_wake(acpi_sci_irq);
@@ -966,21 +707,7 @@ static int acpi_s2idle_prepare(void)
return 0;
}
-static int acpi_s2idle_prepare_late(void)
-{
- if (!lps0_device_handle || sleep_no_lps0)
- return 0;
-
- if (pm_debug_messages_on)
- lpi_check_constraints();
-
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
-
- return 0;
-}
-
-static bool acpi_s2idle_wake(void)
+bool acpi_s2idle_wake(void)
{
if (!acpi_sci_irq_valid())
return pm_wakeup_pending();
@@ -1046,16 +773,7 @@ static bool acpi_s2idle_wake(void)
return false;
}
-static void acpi_s2idle_restore_early(void)
-{
- if (!lps0_device_handle || sleep_no_lps0)
- return;
-
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
-}
-
-static void acpi_s2idle_restore(void)
+void acpi_s2idle_restore(void)
{
/*
* Drain pending events before restoring the working-state configuration
@@ -1077,7 +795,7 @@ static void acpi_s2idle_restore(void)
}
}
-static void acpi_s2idle_end(void)
+void acpi_s2idle_end(void)
{
acpi_scan_lock_release();
}
@@ -1085,13 +803,16 @@ static void acpi_s2idle_end(void)
static const struct platform_s2idle_ops acpi_s2idle_ops = {
.begin = acpi_s2idle_begin,
.prepare = acpi_s2idle_prepare,
- .prepare_late = acpi_s2idle_prepare_late,
.wake = acpi_s2idle_wake,
- .restore_early = acpi_s2idle_restore_early,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
+void __weak acpi_s2idle_setup(void)
+{
+ s2idle_set_ops(&acpi_s2idle_ops);
+}
+
static void acpi_sleep_suspend_setup(void)
{
int i;
@@ -1103,13 +824,11 @@ static void acpi_sleep_suspend_setup(void)
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
- acpi_scan_add_handler(&lps0_handler);
- s2idle_set_ops(&acpi_s2idle_ops);
+ acpi_s2idle_setup();
}
#else /* !CONFIG_SUSPEND */
#define s2idle_wakeup (false)
-#define lps0_device_handle (NULL)
static inline void acpi_sleep_suspend_setup(void) {}
#endif /* !CONFIG_SUSPEND */
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 3d90480ce1b1..1856f76ac83f 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -15,3 +15,19 @@ static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
return acpi_set_firmware_waking_vector(
(acpi_physical_address)wakeup_address, 0);
}
+
+extern int acpi_s2idle_begin(void);
+extern int acpi_s2idle_prepare(void);
+extern int acpi_s2idle_prepare_late(void);
+extern bool acpi_s2idle_wake(void);
+extern void acpi_s2idle_restore_early(void);
+extern void acpi_s2idle_restore(void);
+extern void acpi_s2idle_end(void);
+
+extern void acpi_s2idle_setup(void);
+
+#ifdef CONFIG_ACPI_SLEEP
+extern bool acpi_sleep_default_s3;
+#else
+#define acpi_sleep_default_s3 (1)
+#endif
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 12c0ece746f0..859b1de31ddc 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -174,6 +174,8 @@ struct acpi_thermal {
struct thermal_zone_device *thermal_zone;
int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
+ struct mutex thermal_check_lock;
+ refcount_t thermal_check_count;
};
/* --------------------------------------------------------------------------
@@ -495,14 +497,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
return 0;
}
-static void acpi_thermal_check(void *data)
-{
- struct acpi_thermal *tz = data;
-
- thermal_zone_device_update(tz->thermal_zone,
- THERMAL_EVENT_UNSPECIFIED);
-}
-
/* sys I/F for generic thermal sysfs support */
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
@@ -900,6 +894,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
Driver Interface
-------------------------------------------------------------------------- */
+static void acpi_queue_thermal_check(struct acpi_thermal *tz)
+{
+ if (!work_pending(&tz->thermal_check_work))
+ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+}
+
static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
@@ -910,17 +910,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
switch (event) {
case ACPI_THERMAL_NOTIFY_TEMPERATURE:
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
@@ -1020,7 +1020,25 @@ static void acpi_thermal_check_fn(struct work_struct *work)
{
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
thermal_check_work);
- acpi_thermal_check(tz);
+
+ /*
+ * In general, it is not sufficient to check the pending bit, because
+ * subsequent instances of this function may be queued after one of them
+ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
+ * one of them is running, though, because it may have done the actual
+ * check some time ago, so allow at least one of them to block on the
+ * mutex while another one is running the update.
+ */
+ if (!refcount_dec_not_one(&tz->thermal_check_count))
+ return;
+
+ mutex_lock(&tz->thermal_check_lock);
+
+ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
+
+ refcount_inc(&tz->thermal_check_count);
+
+ mutex_unlock(&tz->thermal_check_lock);
}
static int acpi_thermal_add(struct acpi_device *device)
@@ -1052,6 +1070,8 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
+ refcount_set(&tz->thermal_check_count, 3);
+ mutex_init(&tz->thermal_check_lock);
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
@@ -1117,7 +1137,7 @@ static int acpi_thermal_resume(struct device *dev)
tz->state.active |= tz->trips.active[i].flags.enabled;
}
- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+ acpi_queue_thermal_check(tz);
return AE_OK;
}
diff --git a/drivers/acpi/tiny-power-button.c b/drivers/acpi/tiny-power-button.c
index 420e61b8eaae..a19f0e4e69f7 100644
--- a/drivers/acpi/tiny-power-button.c
+++ b/drivers/acpi/tiny-power-button.c
@@ -40,6 +40,4 @@ static struct acpi_driver acpi_tiny_power_button_driver = {
},
};
-module_driver(acpi_tiny_power_button_driver,
- acpi_bus_register_driver,
- acpi_bus_unregister_driver);
+module_acpi_driver(acpi_tiny_power_button_driver);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 4f5463b2a217..811d298637cb 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -140,6 +140,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_vendor,
+ .ident = "GIGABYTE GB-BXBT-2807",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
+ },
+ },
+ {
.ident = "Sony VPCEH3U1E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
new file mode 100644
index 000000000000..2b69536cdccb
--- /dev/null
+++ b/drivers/acpi/x86/s2idle.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Architecture-specific ACPI-based support for suspend-to-idle.
+ *
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ *
+ * On platforms supporting the Low Power S0 Idle interface there is an ACPI
+ * device object with the PNP0D80 compatible device ID (System Power Management
+ * Controller) and a specific _DSM method under it. That method, if present,
+ * can be used to indicate to the platform that the OS is transitioning into a
+ * low-power state in which certain types of activity are not desirable or that
+ * it is leaving such a state, which allows the platform to adjust its operation
+ * mode accordingly.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+
+#include "../sleep.h"
+
+#ifdef CONFIG_SUSPEND
+
+static bool sleep_no_lps0 __read_mostly;
+module_param(sleep_no_lps0, bool, 0644);
+MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
+
+static const struct acpi_device_id lps0_device_ids[] = {
+ {"PNP0D80", },
+ {"", },
+};
+
+#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
+
+#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
+#define ACPI_LPS0_SCREEN_OFF 3
+#define ACPI_LPS0_SCREEN_ON 4
+#define ACPI_LPS0_ENTRY 5
+#define ACPI_LPS0_EXIT 6
+
+/* AMD */
+#define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
+#define ACPI_LPS0_SCREEN_OFF_AMD 4
+#define ACPI_LPS0_SCREEN_ON_AMD 5
+
+static acpi_handle lps0_device_handle;
+static guid_t lps0_dsm_guid;
+static char lps0_dsm_func_mask;
+
+/* Device constraint entry structure */
+struct lpi_device_info {
+ char *name;
+ int enabled;
+ union acpi_object *package;
+};
+
+/* Constraint package structure */
+struct lpi_device_constraint {
+ int uid;
+ int min_dstate;
+ int function_states;
+};
+
+struct lpi_constraints {
+ acpi_handle handle;
+ int min_dstate;
+};
+
+/* AMD */
+/* Device constraint entry structure */
+struct lpi_device_info_amd {
+ int revision;
+ int count;
+ union acpi_object *package;
+};
+
+/* Constraint package structure */
+struct lpi_device_constraint_amd {
+ char *name;
+ int enabled;
+ int function_states;
+ int min_dstate;
+};
+
+static struct lpi_constraints *lpi_constraints_table;
+static int lpi_constraints_table_size;
+static int rev_id;
+
+static void lpi_device_get_constraints_amd(void)
+{
+ union acpi_object *out_obj;
+ int i, j, k;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+ 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+ if (!out_obj)
+ return;
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
+ for (i = 0; i < out_obj->package.count; i++) {
+ union acpi_object *package = &out_obj->package.elements[i];
+
+ if (package->type == ACPI_TYPE_PACKAGE) {
+ lpi_constraints_table = kcalloc(package->package.count,
+ sizeof(*lpi_constraints_table),
+ GFP_KERNEL);
+
+ if (!lpi_constraints_table)
+ goto free_acpi_buffer;
+
+ acpi_handle_debug(lps0_device_handle,
+ "LPI: constraints list begin:\n");
+
+ for (j = 0; j < package->package.count; ++j) {
+ union acpi_object *info_obj = &package->package.elements[j];
+ struct lpi_device_constraint_amd dev_info = {};
+ struct lpi_constraints *list;
+ acpi_status status;
+
+ for (k = 0; k < info_obj->package.count; ++k) {
+ union acpi_object *obj = &info_obj->package.elements[k];
+
+ list = &lpi_constraints_table[lpi_constraints_table_size];
+ list->min_dstate = -1;
+
+ switch (k) {
+ case 0:
+ dev_info.enabled = obj->integer.value;
+ break;
+ case 1:
+ dev_info.name = obj->string.pointer;
+ break;
+ case 2:
+ dev_info.function_states = obj->integer.value;
+ break;
+ case 3:
+ dev_info.min_dstate = obj->integer.value;
+ break;
+ }
+
+ if (!dev_info.enabled || !dev_info.name ||
+ !dev_info.min_dstate)
+ continue;
+
+ status = acpi_get_handle(NULL, dev_info.name,
+ &list->handle);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ acpi_handle_debug(lps0_device_handle,
+ "Name:%s\n", dev_info.name);
+
+ list->min_dstate = dev_info.min_dstate;
+
+ if (list->min_dstate < 0) {
+ acpi_handle_debug(lps0_device_handle,
+ "Incomplete constraint defined\n");
+ continue;
+ }
+ }
+ lpi_constraints_table_size++;
+ }
+ }
+ }
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
+
+free_acpi_buffer:
+ ACPI_FREE(out_obj);
+}
+
+static void lpi_device_get_constraints(void)
+{
+ union acpi_object *out_obj;
+ int i;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+ 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
+ if (!out_obj)
+ return;
+
+ lpi_constraints_table = kcalloc(out_obj->package.count,
+ sizeof(*lpi_constraints_table),
+ GFP_KERNEL);
+ if (!lpi_constraints_table)
+ goto free_acpi_buffer;
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
+
+ for (i = 0; i < out_obj->package.count; i++) {
+ struct lpi_constraints *constraint;
+ acpi_status status;
+ union acpi_object *package = &out_obj->package.elements[i];
+ struct lpi_device_info info = { };
+ int package_count = 0, j;
+
+ if (!package)
+ continue;
+
+ for (j = 0; j < package->package.count; ++j) {
+ union acpi_object *element =
+ &(package->package.elements[j]);
+
+ switch (element->type) {
+ case ACPI_TYPE_INTEGER:
+ info.enabled = element->integer.value;
+ break;
+ case ACPI_TYPE_STRING:
+ info.name = element->string.pointer;
+ break;
+ case ACPI_TYPE_PACKAGE:
+ package_count = element->package.count;
+ info.package = element->package.elements;
+ break;
+ }
+ }
+
+ if (!info.enabled || !info.package || !info.name)
+ continue;
+
+ constraint = &lpi_constraints_table[lpi_constraints_table_size];
+
+ status = acpi_get_handle(NULL, info.name, &constraint->handle);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ acpi_handle_debug(lps0_device_handle,
+ "index:%d Name:%s\n", i, info.name);
+
+ constraint->min_dstate = -1;
+
+ for (j = 0; j < package_count; ++j) {
+ union acpi_object *info_obj = &info.package[j];
+ union acpi_object *cnstr_pkg;
+ union acpi_object *obj;
+ struct lpi_device_constraint dev_info;
+
+ switch (info_obj->type) {
+ case ACPI_TYPE_INTEGER:
+ /* version */
+ break;
+ case ACPI_TYPE_PACKAGE:
+ if (info_obj->package.count < 2)
+ break;
+
+ cnstr_pkg = info_obj->package.elements;
+ obj = &cnstr_pkg[0];
+ dev_info.uid = obj->integer.value;
+ obj = &cnstr_pkg[1];
+ dev_info.min_dstate = obj->integer.value;
+
+ acpi_handle_debug(lps0_device_handle,
+ "uid:%d min_dstate:%s\n",
+ dev_info.uid,
+ acpi_power_state_string(dev_info.min_dstate));
+
+ constraint->min_dstate = dev_info.min_dstate;
+ break;
+ }
+ }
+
+ if (constraint->min_dstate < 0) {
+ acpi_handle_debug(lps0_device_handle,
+ "Incomplete constraint defined\n");
+ continue;
+ }
+
+ lpi_constraints_table_size++;
+ }
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
+
+free_acpi_buffer:
+ ACPI_FREE(out_obj);
+}
+
+static void lpi_check_constraints(void)
+{
+ int i;
+
+ for (i = 0; i < lpi_constraints_table_size; ++i) {
+ acpi_handle handle = lpi_constraints_table[i].handle;
+ struct acpi_device *adev;
+
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ continue;
+
+ acpi_handle_debug(handle,
+ "LPI: required min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+
+ if (!adev->flags.power_manageable) {
+ acpi_handle_info(handle, "LPI: Device not power manageable\n");
+ lpi_constraints_table[i].handle = NULL;
+ continue;
+ }
+
+ if (adev->power.state < lpi_constraints_table[i].min_dstate)
+ acpi_handle_info(handle,
+ "LPI: Constraint not met; min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+ }
+}
+
+static void acpi_sleep_run_lps0_dsm(unsigned int func)
+{
+ union acpi_object *out_obj;
+
+ if (!(lps0_dsm_func_mask & (1 << func)))
+ return;
+
+ out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
+ ACPI_FREE(out_obj);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
+ func, out_obj ? "successful" : "failed");
+}
+
+static bool acpi_s2idle_vendor_amd(void)
+{
+ return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
+}
+
+static int lps0_device_attach(struct acpi_device *adev,
+ const struct acpi_device_id *not_used)
+{
+ union acpi_object *out_obj;
+
+ if (lps0_device_handle)
+ return 0;
+
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
+ return 0;
+
+ if (acpi_s2idle_vendor_amd()) {
+ guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
+ out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
+ rev_id = 0;
+ } else {
+ guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
+ out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
+ rev_id = 1;
+ }
+
+ /* Check if the _DSM is present and as expected. */
+ if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
+ acpi_handle_debug(adev->handle,
+ "_DSM function 0 evaluation failed\n");
+ return 0;
+ }
+
+ lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
+
+ ACPI_FREE(out_obj);
+
+ acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
+ lps0_dsm_func_mask);
+
+ lps0_device_handle = adev->handle;
+
+ if (acpi_s2idle_vendor_amd())
+ lpi_device_get_constraints_amd();
+ else
+ lpi_device_get_constraints();
+
+ /*
+ * Use suspend-to-idle by default if the default suspend mode was not
+ * set from the command line.
+ */
+ if (mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3)
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
+
+ /*
+ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+ * EC GPE to be enabled while suspended for certain wakeup devices to
+ * work, so mark it as wakeup-capable.
+ */
+ acpi_ec_mark_gpe_for_wake();
+
+ return 0;
+}
+
+static struct acpi_scan_handler lps0_handler = {
+ .ids = lps0_device_ids,
+ .attach = lps0_device_attach,
+};
+
+int acpi_s2idle_prepare_late(void)
+{
+ if (!lps0_device_handle || sleep_no_lps0)
+ return 0;
+
+ if (pm_debug_messages_on)
+ lpi_check_constraints();
+
+ if (acpi_s2idle_vendor_amd()) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
+ } else {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+ }
+
+ return 0;
+}
+
+void acpi_s2idle_restore_early(void)
+{
+ if (!lps0_device_handle || sleep_no_lps0)
+ return;
+
+ if (acpi_s2idle_vendor_amd()) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
+ } else {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
+ }
+}
+
+static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
+ .begin = acpi_s2idle_begin,
+ .prepare = acpi_s2idle_prepare,
+ .prepare_late = acpi_s2idle_prepare_late,
+ .wake = acpi_s2idle_wake,
+ .restore_early = acpi_s2idle_restore_early,
+ .restore = acpi_s2idle_restore,
+ .end = acpi_s2idle_end,
+};
+
+void acpi_s2idle_setup(void)
+{
+ acpi_scan_add_handler(&lps0_handler);
+ s2idle_set_ops(&acpi_s2idle_ops_lps0);
+}
+
+#endif /* CONFIG_SUSPEND */
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 1338209f9f86..c119736ca56a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1836,7 +1836,7 @@ static void binder_deferred_fd_close(int fd)
if (!twcb)
return;
init_task_work(&twcb->twork, binder_do_fd_close);
- __close_fd_get_file(fd, &twcb->file);
+ close_fd_get_file(fd, &twcb->file);
if (twcb->file) {
filp_close(twcb->file, current->files);
task_work_add(current, &twcb->twork, TWA_RESUME);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 65a3886f68c9..5f0472c18bcb 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
- return err;
+ goto err_out_disable_pdev;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 25e08e5f40bd..6eb4c7a904c5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -208,6 +208,16 @@ int device_links_read_lock_held(void)
#endif
#endif /* !CONFIG_SRCU */
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+ while (target->parent) {
+ target = target->parent;
+ if (dev == target)
+ return true;
+ }
+ return false;
+}
+
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
@@ -221,7 +231,12 @@ int device_is_dependent(struct device *dev, void *target)
struct device_link *link;
int ret;
- if (dev == target)
+ /*
+ * The "ancestors" check is needed to catch the case when the target
+ * device has not been completely initialized yet and it is still
+ * missing from the list of children of its parent device.
+ */
+ if (dev == target || device_is_ancestor(dev, target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
@@ -456,7 +471,9 @@ static int devlink_add_symlinks(struct device *dev,
struct device *con = link->consumer;
char *buf;
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
@@ -470,12 +487,12 @@ static int devlink_add_symlinks(struct device *dev,
if (ret)
goto err_con;
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_con_dev;
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_sup_dev;
@@ -483,7 +500,7 @@ static int devlink_add_symlinks(struct device *dev,
goto out;
err_sup_dev:
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
@@ -506,7 +523,9 @@ static void devlink_remove_symlinks(struct device *dev,
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf) {
@@ -514,9 +533,9 @@ static void devlink_remove_symlinks(struct device *dev,
return;
}
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
sysfs_remove_link(&con->kobj, buf);
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
kfree(buf);
}
@@ -737,8 +756,9 @@ struct device_link *device_link_add(struct device *consumer,
link->link_dev.class = &devlink_class;
device_set_pm_not_required(&link->link_dev);
- dev_set_name(&link->link_dev, "%s--%s",
- dev_name(supplier), dev_name(consumer));
+ dev_set_name(&link->link_dev, "%s:%s--%s:%s",
+ dev_bus_name(supplier), dev_name(supplier),
+ dev_bus_name(consumer), dev_name(consumer));
if (device_register(&link->link_dev)) {
put_device(consumer);
put_device(supplier);
@@ -1808,9 +1828,7 @@ const char *dev_driver_string(const struct device *dev)
* never change once they are set, so they don't need special care.
*/
drv = READ_ONCE(dev->driver);
- return drv ? drv->name :
- (dev->bus ? dev->bus->name :
- (dev->class ? dev->class->name : ""));
+ return drv ? drv->name : dev_bus_name(dev);
}
EXPORT_SYMBOL(dev_driver_string);
@@ -4414,6 +4432,12 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*
* Set the device's firmware node pointer to @fwnode, but if a secondary
* firmware node of the device is present, preserve it.
+ *
+ * Valid fwnode cases are:
+ * - primary --> secondary --> -ENODEV
+ * - primary --> NULL
+ * - secondary --> -ENODEV
+ * - NULL
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
@@ -4432,8 +4456,9 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
+ /* Set fn->secondary = NULL, so fn remains the primary fwnode */
if (!(parent && fn == parent->fwnode))
- fn->secondary = ERR_PTR(-ENODEV);
+ fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 2f32f38a11ed..9179825ff646 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -371,13 +371,6 @@ static void driver_bound(struct device *dev)
device_pm_check_callbacks(dev);
/*
- * Reorder successfully probed devices to the end of the device list.
- * This ensures that suspend/resume order matches probe order, which
- * is usually what drivers rely on.
- */
- device_pm_move_to_tail(dev);
-
- /*
* Make sure the device is no longer in one of the deferred lists and
* kick off retrying all pending devices
*/
@@ -619,6 +612,8 @@ dev_groups_failed:
else if (drv->remove)
drv->remove(dev);
probe_failed:
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 95fd1549f87d..8456d8384ac8 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
return -ERANGE;
nvec = platform_irq_count(dev);
+ if (nvec < 0)
+ return nvec;
if (nvec < minvec)
return -ENOSPC;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index ced6863a16a5..84d5acb6301b 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -23,6 +23,7 @@
enum pce_status {
PCE_STATUS_NONE = 0,
PCE_STATUS_ACQUIRED,
+ PCE_STATUS_PREPARED,
PCE_STATUS_ENABLED,
PCE_STATUS_ERROR,
};
@@ -32,9 +33,113 @@ struct pm_clock_entry {
char *con_id;
struct clk *clk;
enum pce_status status;
+ bool enabled_when_prepared;
};
/**
+ * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
+ * entry list.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count to be modified.
+ *
+ * Get exclusive access before modifying the PM clock entry list and the
+ * clock_op_might_sleep count to guard against concurrent modifications.
+ * This also protects against a concurrent clock_op_might_sleep and PM clock
+ * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
+ * happen in atomic context, hence both the mutex and the spinlock must be
+ * taken here.
+ */
+static void pm_clk_list_lock(struct pm_subsys_data *psd)
+ __acquires(&psd->lock)
+{
+ mutex_lock(&psd->clock_mutex);
+ spin_lock_irq(&psd->lock);
+}
+
+/**
+ * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_list_lock().
+ */
+static void pm_clk_list_unlock(struct pm_subsys_data *psd)
+ __releases(&psd->lock)
+{
+ spin_unlock_irq(&psd->lock);
+ mutex_unlock(&psd->clock_mutex);
+}
+
+/**
+ * pm_clk_op_lock - ensure exclusive access for performing clock operations.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count being used.
+ * @flags: stored irq flags.
+ * @fn: string for the caller function's name.
+ *
+ * This is used by pm_clk_suspend() and pm_clk_resume() to guard
+ * against concurrent modifications to the clock entry list and the
+ * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
+ * only the mutex can be locked and those functions can only be used in
+ * non atomic context. If clock_op_might_sleep == 0 then these functions
+ * may be used in any context and only the spinlock can be locked.
+ * Returns -EINVAL if called in atomic context when clock ops might sleep.
+ */
+static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
+ const char *fn)
+ /* sparse annotations don't work here as exit state isn't static */
+{
+ bool atomic_context = in_atomic() || irqs_disabled();
+
+try_again:
+ spin_lock_irqsave(&psd->lock, *flags);
+ if (!psd->clock_op_might_sleep) {
+ /* the __release is there to work around sparse limitations */
+ __release(&psd->lock);
+ return 0;
+ }
+
+ /* bail out if in atomic context */
+ if (atomic_context) {
+ pr_err("%s: atomic context with clock_ops_might_sleep = %d",
+ fn, psd->clock_op_might_sleep);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ might_sleep();
+ return -EPERM;
+ }
+
+ /* we must switch to the mutex */
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ mutex_lock(&psd->clock_mutex);
+
+ /*
+ * There was a possibility for psd->clock_op_might_sleep
+ * to become 0 above. Keep the mutex only if not the case.
+ */
+ if (likely(psd->clock_op_might_sleep))
+ return 0;
+
+ mutex_unlock(&psd->clock_mutex);
+ goto try_again;
+}
+
+/**
+ * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_op_lock().
+ * @flags: irq flags provided by pm_clk_op_lock().
+ */
+static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
+ /* sparse annotations don't work here as entry state isn't static */
+{
+ if (psd->clock_op_might_sleep) {
+ mutex_unlock(&psd->clock_mutex);
+ } else {
+ /* the __acquire is there to work around sparse limitations */
+ __acquire(&psd->lock);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ }
+}
+
+/**
* pm_clk_enable - Enable a clock, reporting any errors
* @dev: The device for the given clock
* @ce: PM clock entry corresponding to the clock.
@@ -43,14 +148,21 @@ static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce
{
int ret;
- if (ce->status < PCE_STATUS_ERROR) {
+ switch (ce->status) {
+ case PCE_STATUS_ACQUIRED:
+ ret = clk_prepare_enable(ce->clk);
+ break;
+ case PCE_STATUS_PREPARED:
ret = clk_enable(ce->clk);
- if (!ret)
- ce->status = PCE_STATUS_ENABLED;
- else
- dev_err(dev, "%s: failed to enable clk %p, error %d\n",
- __func__, ce->clk, ret);
+ break;
+ default:
+ return;
}
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
+ else
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, ce->clk, ret);
}
/**
@@ -64,17 +176,20 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
ce->clk = clk_get(dev, ce->con_id);
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
+ return;
+ } else if (clk_is_enabled_when_prepared(ce->clk)) {
+ /* we defer preparing the clock in that case */
+ ce->status = PCE_STATUS_ACQUIRED;
+ ce->enabled_when_prepared = true;
+ } else if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ return;
} else {
- if (clk_prepare(ce->clk)) {
- ce->status = PCE_STATUS_ERROR;
- dev_err(dev, "clk_prepare() failed\n");
- } else {
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev,
- "Clock %pC con_id %s managed by runtime PM.\n",
- ce->clk, ce->con_id);
- }
+ ce->status = PCE_STATUS_PREPARED;
}
+ dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
}
static int __pm_clk_add(struct device *dev, const char *con_id,
@@ -106,9 +221,11 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
pm_clk_acquire(dev, ce);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_add_tail(&ce->node, &psd->clock_list);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep++;
+ pm_clk_list_unlock(psd);
return 0;
}
@@ -239,14 +356,20 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (!ce)
return;
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
-
- if (ce->status >= PCE_STATUS_ACQUIRED) {
- clk_unprepare(ce->clk);
+ switch (ce->status) {
+ case PCE_STATUS_ENABLED:
+ clk_disable(ce->clk);
+ fallthrough;
+ case PCE_STATUS_PREPARED:
+ clk_unprepare(ce->clk);
+ fallthrough;
+ case PCE_STATUS_ACQUIRED:
+ case PCE_STATUS_ERROR:
+ if (!IS_ERR(ce->clk))
clk_put(ce->clk);
- }
+ break;
+ default:
+ break;
}
kfree(ce->con_id);
@@ -269,7 +392,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
if (!psd)
return;
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (!con_id && !ce->con_id)
@@ -280,12 +403,14 @@ void pm_clk_remove(struct device *dev, const char *con_id)
goto remove;
}
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
@@ -307,19 +432,21 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk)
if (!psd || !clk)
return;
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (clk == ce->clk)
goto remove;
}
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
@@ -330,13 +457,16 @@ EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
* @dev: Device to initialize the list of PM clocks for.
*
* Initialize the lock and clock_list members of the device's pm_subsys_data
- * object.
+ * object, set the count of clocks that might sleep to 0.
*/
void pm_clk_init(struct device *dev)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
- if (psd)
+ if (psd) {
INIT_LIST_HEAD(&psd->clock_list);
+ mutex_init(&psd->clock_mutex);
+ psd->clock_op_might_sleep = 0;
+ }
}
EXPORT_SYMBOL_GPL(pm_clk_init);
@@ -372,12 +502,13 @@ void pm_clk_destroy(struct device *dev)
INIT_LIST_HEAD(&list);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
list_move(&ce->node, &list);
+ psd->clock_op_might_sleep = 0;
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
dev_pm_put_subsys_data(dev);
@@ -397,23 +528,30 @@ int pm_clk_suspend(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
+ if (ce->status == PCE_STATUS_ENABLED) {
+ if (ce->enabled_when_prepared) {
+ clk_disable_unprepare(ce->clk);
+ ce->status = PCE_STATUS_ACQUIRED;
+ } else {
clk_disable(ce->clk);
- ce->status = PCE_STATUS_ACQUIRED;
+ ce->status = PCE_STATUS_PREPARED;
+ }
}
}
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
@@ -428,18 +566,21 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry(ce, &psd->clock_list, node)
__pm_clk_enable(dev, ce);
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9a14eedacb92..aaf6c83b5cf6 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -297,6 +297,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
return state;
}
+static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *parent,
+ unsigned int pstate)
+{
+ if (!parent->set_performance_state)
+ return pstate;
+
+ return dev_pm_opp_xlate_performance_state(genpd->opp_table,
+ parent->opp_table,
+ pstate);
+}
+
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
@@ -311,13 +323,8 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
list_for_each_entry(link, &genpd->child_links, child_node) {
parent = link->parent;
- if (!parent->set_performance_state)
- continue;
-
/* Find parent's performance state */
- ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
- parent->opp_table,
- state);
+ ret = genpd_xlate_performance_state(genpd, parent, state);
if (unlikely(ret < 0))
goto err;
@@ -339,9 +346,11 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
goto err;
}
- ret = genpd->set_performance_state(genpd, state);
- if (ret)
- goto err;
+ if (genpd->set_performance_state) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret)
+ goto err;
+ }
genpd->performance_state = state;
return 0;
@@ -352,9 +361,6 @@ err:
child_node) {
parent = link->parent;
- if (!parent->set_performance_state)
- continue;
-
genpd_lock_nested(parent, depth + 1);
parent_state = link->prev_performance_state;
@@ -399,9 +405,6 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
if (!genpd)
return -ENODEV;
- if (unlikely(!genpd->set_performance_state))
- return -EINVAL;
-
if (WARN_ON(!dev->power.subsys_data ||
!dev->power.subsys_data->domain_data))
return -EINVAL;
@@ -423,6 +426,35 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+/**
+ * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
+ *
+ * @dev: Device to handle
+ * @next: impending interrupt/wakeup for the device
+ *
+ *
+ * Allow devices to inform of the next wakeup. It's assumed that the users
+ * guarantee that the genpd wouldn't be detached while this routine is getting
+ * called. Additionally, it's also assumed that @dev isn't runtime suspended
+ * (RPM_SUSPENDED)."
+ * Although devices are expected to update the next_wakeup after the end of
+ * their usecase as well, it is possible the devices themselves may not know
+ * about that, so stale @next will be ignored when powering off the domain.
+ */
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ gpd_data->next_wakeup = next;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -934,8 +966,7 @@ static int genpd_runtime_resume(struct device *dev)
err_stop:
genpd_stop_dev(genpd, dev);
err_poweroff:
- if (!pm_runtime_is_irq_safe(dev) ||
- (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+ if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
genpd_lock(genpd);
genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
@@ -1465,6 +1496,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+ gpd_data->next_wakeup = KTIME_MAX;
spin_lock_irq(&dev->power.lock);
@@ -2463,7 +2495,7 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
out:
mutex_unlock(&gpd_list_lock);
- return ret;
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
}
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
@@ -2952,7 +2984,15 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
else
WARN_ON(1);
- seq_puts(s, p);
+ seq_printf(s, "%-25s ", p);
+}
+
+static void perf_status_str(struct seq_file *s, struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ seq_put_decimal_ull(s, "", gpd_data->performance_state);
}
static int genpd_summary_one(struct seq_file *s,
@@ -2980,7 +3020,7 @@ static int genpd_summary_one(struct seq_file *s,
else
snprintf(state, sizeof(state), "%s",
status_lookup[genpd->status]);
- seq_printf(s, "%-30s %-15s ", genpd->name, state);
+ seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
/*
* Modifications on the list require holding locks on both
@@ -2988,6 +3028,8 @@ static int genpd_summary_one(struct seq_file *s,
* Also genpd->name is immutable.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ if (list_is_first(&link->parent_node, &genpd->parent_links))
+ seq_printf(s, "\n%48s", " ");
seq_printf(s, "%s", link->child->name);
if (!list_is_last(&link->parent_node, &genpd->parent_links))
seq_puts(s, ", ");
@@ -3002,6 +3044,7 @@ static int genpd_summary_one(struct seq_file *s,
seq_printf(s, "\n %-50s ", kobj_path);
rtpm_status_str(s, pm_data->dev);
+ perf_status_str(s, pm_data->dev);
kfree(kobj_path);
}
@@ -3017,9 +3060,9 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, "domain status children\n");
+ seq_puts(s, "domain status children performance\n");
seq_puts(s, " /device runtime status\n");
- seq_puts(s, "----------------------------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
if (ret)
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 490ed7deb99a..c6c218758f0b 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -117,6 +117,55 @@ static bool default_suspend_ok(struct device *dev)
return td->cached_suspend_ok;
}
+static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
+{
+ ktime_t domain_wakeup = KTIME_MAX;
+ ktime_t next_wakeup;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
+ return;
+
+ /*
+ * Devices that have a predictable wakeup pattern, may specify
+ * their next wakeup. Let's find the next wakeup from all the
+ * devices attached to this domain and from all the sub-domains.
+ * It is possible that component's a next wakeup may have become
+ * stale when we read that here. We will ignore to ensure the domain
+ * is able to enter its optimal idle state.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ next_wakeup = to_gpd_data(pdd)->next_wakeup;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ next_wakeup = link->child->next_wakeup;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ genpd->next_wakeup = domain_wakeup;
+}
+
+static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
+ unsigned int state, ktime_t now)
+{
+ ktime_t domain_wakeup = genpd->next_wakeup;
+ s64 idle_time_ns, min_sleep_ns;
+
+ min_sleep_ns = genpd->states[state].power_off_latency_ns +
+ genpd->states[state].residency_ns;
+
+ idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
+
+ return idle_time_ns >= min_sleep_ns;
+}
+
static bool __default_power_down_ok(struct dev_pm_domain *pd,
unsigned int state)
{
@@ -201,16 +250,41 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
}
/**
- * default_power_down_ok - Default generic PM domain power off governor routine.
+ * _default_power_down_ok - Default generic PM domain power off governor routine.
* @pd: PM domain to check.
*
* This routine must be executed under the PM domain's lock.
*/
-static bool default_power_down_ok(struct dev_pm_domain *pd)
+static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ int state_idx = genpd->state_count - 1;
struct gpd_link *link;
+ /*
+ * Find the next wakeup from devices that can determine their own wakeup
+ * to find when the domain would wakeup and do it for every device down
+ * the hierarchy. It is not worth while to sleep if the state's residency
+ * cannot be met.
+ */
+ update_domain_next_wakeup(genpd, now);
+ if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) {
+ /* Let's find out the deepest domain idle state, the devices prefer */
+ while (state_idx >= 0) {
+ if (next_wakeup_allows_state(genpd, state_idx, now)) {
+ genpd->max_off_time_changed = true;
+ break;
+ }
+ state_idx--;
+ }
+
+ if (state_idx < 0) {
+ state_idx = 0;
+ genpd->cached_power_down_ok = false;
+ goto done;
+ }
+ }
+
if (!genpd->max_off_time_changed) {
genpd->state_idx = genpd->cached_power_down_state_idx;
return genpd->cached_power_down_ok;
@@ -228,21 +302,30 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = false;
genpd->cached_power_down_ok = true;
- genpd->state_idx = genpd->state_count - 1;
- /* Find a state to power down to, starting from the deepest. */
- while (!__default_power_down_ok(pd, genpd->state_idx)) {
- if (genpd->state_idx == 0) {
+ /*
+ * Find a state to power down to, starting from the state
+ * determined by the next wakeup.
+ */
+ while (!__default_power_down_ok(pd, state_idx)) {
+ if (state_idx == 0) {
genpd->cached_power_down_ok = false;
break;
}
- genpd->state_idx--;
+ state_idx--;
}
+done:
+ genpd->state_idx = state_idx;
genpd->cached_power_down_state_idx = genpd->state_idx;
return genpd->cached_power_down_ok;
}
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ return _default_power_down_ok(pd, ktime_get());
+}
+
static bool always_on_power_down_ok(struct dev_pm_domain *domain)
{
return false;
@@ -254,11 +337,12 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct cpuidle_device *dev;
ktime_t domain_wakeup, next_hrtimer;
+ ktime_t now = ktime_get();
s64 idle_duration_ns;
int cpu, i;
/* Validate dev PM QoS constraints. */
- if (!default_power_down_ok(pd))
+ if (!_default_power_down_ok(pd, now))
return false;
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
@@ -280,7 +364,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
}
/* The minimum idle duration is from now - until the next wakeup. */
- idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
if (idle_duration_ns <= 0)
return false;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 46793276598d..f893c3c5af07 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -16,6 +16,7 @@
*/
#define pr_fmt(fmt) "PM: " fmt
+#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/export.h>
@@ -449,8 +450,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
- pr_err("Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
+ dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
+ error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -1897,8 +1898,8 @@ int dpm_prepare(pm_message_t state)
error = 0;
continue;
}
- pr_info("Device %s not prepared for power transition: code %d\n",
- dev_name(dev), error);
+ dev_info(dev, "not prepared for power transition: code %d\n",
+ error);
put_device(dev);
break;
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index bfda153b1a41..a46a7e30881b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1100,7 +1100,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
* suspending the device when both its runtime PM status is %RPM_ACTIVE and its
* runtime PM usage counter is not zero.
*
- * The caller is resposible for decrementing the runtime PM usage counter of
+ * The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 8dfac7f3ed7a..ff2ee87987c7 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
devname = dev_name(map->dev);
if (name) {
- map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ if (!map->debugfs_name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
+ if (!map->debugfs_name)
+ return;
+ }
name = map->debugfs_name;
} else {
name = devname;
@@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
-
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
+ if (!map->debugfs_name)
+ return;
name = map->debugfs_name;
dummy_index++;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ecceaaa1a66f..583b671b1d2d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -16,13 +16,7 @@ menuconfig BLK_DEV
if BLK_DEV
-config BLK_DEV_NULL_BLK
- tristate "Null test block driver"
- select CONFIGFS_FS
-
-config BLK_DEV_NULL_BLK_FAULT_INJECTION
- bool "Support fault injection for Null test block driver"
- depends on BLK_DEV_NULL_BLK && FAULT_INJECTION
+source "drivers/block/null_blk/Kconfig"
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -451,6 +445,7 @@ config BLK_DEV_RBD
config BLK_DEV_RSXX
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI
+ select CRC32
help
Device driver for IBM's high speed PCIe SSD
storage device: Flash Adapter 900GB Full Height.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index e1f63117ee94..a3170859e01d 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -41,12 +41,7 @@ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
-obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
-null_blk-objs := null_blk_main.o
-ifeq ($(CONFIG_BLK_DEV_ZONED), y)
-null_blk-$(CONFIG_TRACING) += null_blk_trace.o
-endif
-null_blk-$(CONFIG_BLK_DEV_ZONED) += null_blk_zoned.o
+obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 71c2b1564558..9e2d0c6a3877 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -201,7 +201,7 @@ struct amiga_floppy_struct {
int busy; /* true when drive is active */
int dirty; /* true when trackbuf is not on disk */
int status; /* current error code for unit */
- struct gendisk *gendisk;
+ struct gendisk *gendisk[2];
struct blk_mq_tag_set tag_set;
};
@@ -1669,6 +1669,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return -EBUSY;
}
+ if (unit[drive].type->code == FD_NODRIVE) {
+ mutex_unlock(&amiflop_mutex);
+ return -ENXIO;
+ }
+
if (mode & (FMODE_READ|FMODE_WRITE)) {
bdev_check_media_change(bdev);
if (mode & FMODE_WRITE) {
@@ -1695,7 +1700,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
unit[drive].dtype=&data_types[system];
unit[drive].blocks=unit[drive].type->heads*unit[drive].type->tracks*
data_types[system].sects*unit[drive].type->sect_mult;
- set_capacity(unit[drive].gendisk, unit[drive].blocks);
+ set_capacity(unit[drive].gendisk[system], unit[drive].blocks);
printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
unit[drive].type->name, data_types[system].name);
@@ -1772,36 +1777,68 @@ static const struct blk_mq_ops amiflop_mq_ops = {
.queue_rq = amiflop_queue_rq,
};
-static struct gendisk *fd_alloc_disk(int drive)
+static int fd_alloc_disk(int drive, int system)
{
struct gendisk *disk;
disk = alloc_disk(1);
if (!disk)
goto out;
-
- disk->queue = blk_mq_init_sq_queue(&unit[drive].tag_set, &amiflop_mq_ops,
- 2, BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(disk->queue)) {
- disk->queue = NULL;
+ disk->queue = blk_mq_init_queue(&unit[drive].tag_set);
+ if (IS_ERR(disk->queue))
goto out_put_disk;
- }
+ disk->major = FLOPPY_MAJOR;
+ disk->first_minor = drive + system;
+ disk->fops = &floppy_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
+ if (system)
+ sprintf(disk->disk_name, "fd%d_msdos", drive);
+ else
+ sprintf(disk->disk_name, "fd%d", drive);
+ disk->private_data = &unit[drive];
+ set_capacity(disk, 880 * 2);
+
+ unit[drive].gendisk[system] = disk;
+ add_disk(disk);
+ return 0;
+
+out_put_disk:
+ disk->queue = NULL;
+ put_disk(disk);
+out:
+ return -ENOMEM;
+}
+
+static int fd_alloc_drive(int drive)
+{
unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
if (!unit[drive].trackbuf)
- goto out_cleanup_queue;
+ goto out;
- return disk;
+ memset(&unit[drive].tag_set, 0, sizeof(unit[drive].tag_set));
+ unit[drive].tag_set.ops = &amiflop_mq_ops;
+ unit[drive].tag_set.nr_hw_queues = 1;
+ unit[drive].tag_set.nr_maps = 1;
+ unit[drive].tag_set.queue_depth = 2;
+ unit[drive].tag_set.numa_node = NUMA_NO_NODE;
+ unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
+ goto out_cleanup_trackbuf;
-out_cleanup_queue:
- blk_cleanup_queue(disk->queue);
- disk->queue = NULL;
+ pr_cont(" fd%d", drive);
+
+ if (fd_alloc_disk(drive, 0) || fd_alloc_disk(drive, 1))
+ goto out_cleanup_tagset;
+ return 0;
+
+out_cleanup_tagset:
blk_mq_free_tag_set(&unit[drive].tag_set);
-out_put_disk:
- put_disk(disk);
+out_cleanup_trackbuf:
+ kfree(unit[drive].trackbuf);
out:
unit[drive].type->code = FD_NODRIVE;
- return NULL;
+ return -ENOMEM;
}
static int __init fd_probe_drives(void)
@@ -1812,29 +1849,16 @@ static int __init fd_probe_drives(void)
drives=0;
nomem=0;
for(drive=0;drive<FD_MAX_UNITS;drive++) {
- struct gendisk *disk;
fd_probe(drive);
if (unit[drive].type->code == FD_NODRIVE)
continue;
- disk = fd_alloc_disk(drive);
- if (!disk) {
+ if (fd_alloc_drive(drive) < 0) {
pr_cont(" no mem for fd%d", drive);
nomem = 1;
continue;
}
- unit[drive].gendisk = disk;
drives++;
-
- pr_cont(" fd%d",drive);
- disk->major = FLOPPY_MAJOR;
- disk->first_minor = drive;
- disk->fops = &floppy_fops;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- sprintf(disk->disk_name, "fd%d", drive);
- disk->private_data = &unit[drive];
- set_capacity(disk, 880*2);
- add_disk(disk);
}
if ((drives > 0) || (nomem == 0)) {
if (drives == 0)
@@ -1846,15 +1870,6 @@ static int __init fd_probe_drives(void)
return -ENOMEM;
}
-static struct kobject *floppy_find(dev_t dev, int *part, void *data)
-{
- int drive = *part & 3;
- if (unit[drive].type->code == FD_NODRIVE)
- return NULL;
- *part = 0;
- return get_disk_and_module(unit[drive].gendisk);
-}
-
static int __init amiga_floppy_probe(struct platform_device *pdev)
{
int i, ret;
@@ -1884,9 +1899,6 @@ static int __init amiga_floppy_probe(struct platform_device *pdev)
if (fd_probe_drives() < 1) /* No usable drives */
goto out_probe;
- blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
- floppy_find, NULL, NULL);
-
/* initialize variables */
timer_setup(&motor_on_timer, motor_on_callback, 0);
motor_on_timer.expires = 0;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 313f0b946fe2..ac720bdcd983 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -890,19 +890,13 @@ void
aoecmd_sleepwork(struct work_struct *work)
{
struct aoedev *d = container_of(work, struct aoedev, work);
- struct block_device *bd;
- u64 ssize;
if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d);
if (d->flags & DEVFL_NEWSIZE) {
- ssize = get_capacity(d->gd);
- bd = bdget_disk(d->gd, 0);
- if (bd) {
- bd_set_nr_sectors(bd, ssize);
- bdput(bd);
- }
+ set_capacity_and_notify(d->gd, d->ssize);
+
spin_lock_irq(&d->lock);
d->flags |= DEVFL_UP;
d->flags &= ~DEVFL_NEWSIZE;
@@ -971,10 +965,9 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
d->geo.start = 0;
if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
return;
- if (d->gd != NULL) {
- set_capacity(d->gd, ssize);
+ if (d->gd != NULL)
d->flags |= DEVFL_NEWSIZE;
- } else
+ else
d->flags |= DEVFL_GDALLOC;
schedule_work(&d->work);
}
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 3e881fdb06e0..104b713f4055 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -297,7 +297,7 @@ static struct atari_floppy_struct {
unsigned int wpstat; /* current state of WP signal (for
disk change detection) */
int flags; /* flags */
- struct gendisk *disk;
+ struct gendisk *disk[NUM_DISK_MINORS];
int ref;
int type;
struct blk_mq_tag_set tag_set;
@@ -723,12 +723,16 @@ static void fd_error( void )
static int do_format(int drive, int type, struct atari_format_descr *desc)
{
- struct request_queue *q = unit[drive].disk->queue;
+ struct request_queue *q;
unsigned char *p;
int sect, nsect;
unsigned long flags;
int ret;
+ if (type)
+ type--;
+
+ q = unit[drive].disk[type]->queue;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
@@ -738,7 +742,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
local_irq_restore(flags);
if (type) {
- if (--type >= NUM_DISK_MINORS ||
+ if (type >= NUM_DISK_MINORS ||
minor2disktype[type].drive_types > DriveType) {
ret = -EINVAL;
goto out;
@@ -1154,7 +1158,7 @@ static void fd_rwsec_done1(int status)
if (SUDT[-1].blocks > ReqBlock) {
/* try another disk type */
SUDT--;
- set_capacity(unit[SelectedDrive].disk,
+ set_capacity(unit[SelectedDrive].disk[0],
SUDT->blocks);
} else
Probing = 0;
@@ -1169,7 +1173,7 @@ static void fd_rwsec_done1(int status)
/* record not found, but not probing. Maybe stretch wrong ? Restart probing */
if (SUD.autoprobe) {
SUDT = atari_disk_type + StartDiskType[DriveType];
- set_capacity(unit[SelectedDrive].disk,
+ set_capacity(unit[SelectedDrive].disk[0],
SUDT->blocks);
Probing = 1;
}
@@ -1515,7 +1519,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!UDT) {
Probing = 1;
UDT = atari_disk_type + StartDiskType[DriveType];
- set_capacity(floppy->disk, UDT->blocks);
+ set_capacity(bd->rq->rq_disk, UDT->blocks);
UD.autoprobe = 1;
}
}
@@ -1533,7 +1537,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
- set_capacity(floppy->disk, UDT->blocks);
+ set_capacity(bd->rq->rq_disk, UDT->blocks);
UD.autoprobe = 0;
}
@@ -1658,7 +1662,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
printk (KERN_INFO "floppy%d: setting %s %p!\n",
drive, dtp->name, dtp);
UDT = dtp;
- set_capacity(floppy->disk, UDT->blocks);
+ set_capacity(disk, UDT->blocks);
if (cmd == FDDEFPRM) {
/* save settings as permanent default type */
@@ -1702,7 +1706,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
return -EINVAL;
UDT = dtp;
- set_capacity(floppy->disk, UDT->blocks);
+ set_capacity(disk, UDT->blocks);
return 0;
case FDMSGON:
@@ -1725,7 +1729,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
UDT = NULL;
/* MSch: invalidate default_params */
default_params[drive].blocks = 0;
- set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
+ set_capacity(disk, MAX_DISK_SIZE * 2);
fallthrough;
case FDFMTEND:
case FDFLUSH:
@@ -1962,14 +1966,50 @@ static const struct blk_mq_ops ataflop_mq_ops = {
.commit_rqs = ataflop_commit_rqs,
};
-static struct kobject *floppy_find(dev_t dev, int *part, void *data)
+static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
{
- int drive = *part & 3;
- int type = *part >> 2;
+ struct gendisk *disk;
+ int ret;
+
+ disk = alloc_disk(1);
+ if (!disk)
+ return -ENOMEM;
+
+ disk->queue = blk_mq_init_queue(&unit[drive].tag_set);
+ if (IS_ERR(disk->queue)) {
+ ret = PTR_ERR(disk->queue);
+ disk->queue = NULL;
+ put_disk(disk);
+ return ret;
+ }
+
+ disk->major = FLOPPY_MAJOR;
+ disk->first_minor = drive + (type << 2);
+ sprintf(disk->disk_name, "fd%d", drive);
+ disk->fops = &floppy_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
+ disk->private_data = &unit[drive];
+ set_capacity(disk, MAX_DISK_SIZE * 2);
+
+ unit[drive].disk[type] = disk;
+ return 0;
+}
+
+static DEFINE_MUTEX(ataflop_probe_lock);
+
+static void ataflop_probe(dev_t dev)
+{
+ int drive = MINOR(dev) & 3;
+ int type = MINOR(dev) >> 2;
+
if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
- return NULL;
- *part = 0;
- return get_disk_and_module(unit[drive].disk);
+ return;
+ mutex_lock(&ataflop_probe_lock);
+ if (!unit[drive].disk[type]) {
+ if (ataflop_alloc_disk(drive, type) == 0)
+ add_disk(unit[drive].disk[type]);
+ }
+ mutex_unlock(&ataflop_probe_lock);
}
static int __init atari_floppy_init (void)
@@ -1981,23 +2021,26 @@ static int __init atari_floppy_init (void)
/* Amiga, Mac, ... don't have Atari-compatible floppy :-) */
return -ENODEV;
- if (register_blkdev(FLOPPY_MAJOR,"fd"))
- return -EBUSY;
+ mutex_lock(&ataflop_probe_lock);
+ ret = __register_blkdev(FLOPPY_MAJOR, "fd", ataflop_probe);
+ if (ret)
+ goto out_unlock;
for (i = 0; i < FD_MAX_UNITS; i++) {
- unit[i].disk = alloc_disk(1);
- if (!unit[i].disk) {
- ret = -ENOMEM;
+ memset(&unit[i].tag_set, 0, sizeof(unit[i].tag_set));
+ unit[i].tag_set.ops = &ataflop_mq_ops;
+ unit[i].tag_set.nr_hw_queues = 1;
+ unit[i].tag_set.nr_maps = 1;
+ unit[i].tag_set.queue_depth = 2;
+ unit[i].tag_set.numa_node = NUMA_NO_NODE;
+ unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
+ if (ret)
goto err;
- }
- unit[i].disk->queue = blk_mq_init_sq_queue(&unit[i].tag_set,
- &ataflop_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(unit[i].disk->queue)) {
- put_disk(unit[i].disk);
- ret = PTR_ERR(unit[i].disk->queue);
- unit[i].disk->queue = NULL;
+ ret = ataflop_alloc_disk(i, 0);
+ if (ret) {
+ blk_mq_free_tag_set(&unit[i].tag_set);
goto err;
}
}
@@ -2027,19 +2070,9 @@ static int __init atari_floppy_init (void)
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].track = -1;
unit[i].flags = 0;
- unit[i].disk->major = FLOPPY_MAJOR;
- unit[i].disk->first_minor = i;
- sprintf(unit[i].disk->disk_name, "fd%d", i);
- unit[i].disk->fops = &floppy_fops;
- unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
- unit[i].disk->private_data = &unit[i];
- set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
- add_disk(unit[i].disk);
+ add_disk(unit[i].disk[0]);
}
- blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
- floppy_find, NULL, NULL);
-
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
UseTrackbuffer ? "" : "no ");
@@ -2049,14 +2082,14 @@ static int __init atari_floppy_init (void)
err:
while (--i >= 0) {
- struct gendisk *disk = unit[i].disk;
-
- blk_cleanup_queue(disk->queue);
+ blk_cleanup_queue(unit[i].disk[0]->queue);
+ put_disk(unit[i].disk[0]);
blk_mq_free_tag_set(&unit[i].tag_set);
- put_disk(unit[i].disk);
}
unregister_blkdev(FLOPPY_MAJOR, "fd");
+out_unlock:
+ mutex_unlock(&ataflop_probe_lock);
return ret;
}
@@ -2101,13 +2134,17 @@ __setup("floppy=", atari_floppy_setup);
static void __exit atari_floppy_exit(void)
{
- int i;
- blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
+ int i, type;
+
for (i = 0; i < FD_MAX_UNITS; i++) {
- del_gendisk(unit[i].disk);
- blk_cleanup_queue(unit[i].disk->queue);
+ for (type = 0; type < NUM_DISK_MINORS; type++) {
+ if (!unit[i].disk[type])
+ continue;
+ del_gendisk(unit[i].disk[type]);
+ blk_cleanup_queue(unit[i].disk[type]->queue);
+ put_disk(unit[i].disk[type]);
+ }
blk_mq_free_tag_set(&unit[i].tag_set);
- put_disk(unit[i].disk);
}
unregister_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index cc49a921339f..c43a6ab4b1f3 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -426,14 +426,15 @@ static void brd_free(struct brd_device *brd)
kfree(brd);
}
-static struct brd_device *brd_init_one(int i, bool *new)
+static void brd_probe(dev_t dev)
{
struct brd_device *brd;
+ int i = MINOR(dev) / max_part;
- *new = false;
+ mutex_lock(&brd_devices_mutex);
list_for_each_entry(brd, &brd_devices, brd_list) {
if (brd->brd_number == i)
- goto out;
+ goto out_unlock;
}
brd = brd_alloc(i);
@@ -442,9 +443,9 @@ static struct brd_device *brd_init_one(int i, bool *new)
add_disk(brd->brd_disk);
list_add_tail(&brd->brd_list, &brd_devices);
}
- *new = true;
-out:
- return brd;
+
+out_unlock:
+ mutex_unlock(&brd_devices_mutex);
}
static void brd_del_one(struct brd_device *brd)
@@ -454,23 +455,6 @@ static void brd_del_one(struct brd_device *brd)
brd_free(brd);
}
-static struct kobject *brd_probe(dev_t dev, int *part, void *data)
-{
- struct brd_device *brd;
- struct kobject *kobj;
- bool new;
-
- mutex_lock(&brd_devices_mutex);
- brd = brd_init_one(MINOR(dev) / max_part, &new);
- kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;
- mutex_unlock(&brd_devices_mutex);
-
- if (new)
- *part = 0;
-
- return kobj;
-}
-
static inline void brd_check_and_reset_par(void)
{
if (unlikely(!max_part))
@@ -510,11 +494,12 @@ static int __init brd_init(void)
* dynamically.
*/
- if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
+ if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe))
return -EIO;
brd_check_and_reset_par();
+ mutex_lock(&brd_devices_mutex);
for (i = 0; i < rd_nr; i++) {
brd = brd_alloc(i);
if (!brd)
@@ -532,9 +517,7 @@ static int __init brd_init(void)
brd->brd_disk->queue = brd->brd_queue;
add_disk(brd->brd_disk);
}
-
- blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
- THIS_MODULE, brd_probe, NULL, NULL);
+ mutex_unlock(&brd_devices_mutex);
pr_info("brd: module loaded\n");
return 0;
@@ -544,6 +527,7 @@ out_free:
list_del(&brd->brd_list);
brd_free(brd);
}
+ mutex_unlock(&brd_devices_mutex);
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
pr_info("brd: module NOT loaded !!!\n");
@@ -557,7 +541,6 @@ static void __exit brd_exit(void)
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd);
- blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
pr_info("brd: module unloaded\n");
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 65b95aef8dbc..1c8c18b2a25f 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2036,8 +2036,7 @@ void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
{
char ppb[10];
- set_capacity(device->vdisk, size);
- revalidate_disk_size(device->vdisk, false);
+ set_capacity_and_notify(device->vdisk, size);
drbd_info(device, "size = %s (%llu KB)\n",
ppsize(ppb, size>>1), (unsigned long long)size>>1);
@@ -2068,8 +2067,7 @@ void drbd_device_cleanup(struct drbd_device *device)
}
D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
- set_capacity(device->vdisk, 0);
- revalidate_disk_size(device->vdisk, false);
+ set_capacity_and_notify(device->vdisk, 0);
if (device->bitmap) {
/* maybe never allocated. */
drbd_bm_resize(device, 0, 1);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index dc333dbe5232..09c86ef3f0fd 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2802,7 +2802,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
if (c_min_rate == 0)
return false;
- curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
+ curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&device->rs_sect_ev);
if (atomic_read(&device->ap_actlog_cnt)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index ba56f3f05312..02044ab7f767 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1678,7 +1678,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
atomic_set(&device->rs_sect_in, 0);
atomic_set(&device->rs_sect_ev, 0);
device->rs_in_flight = 0;
- device->rs_last_events = (int)part_stat_read_accum(&disk->part0, sectors);
+ device->rs_last_events =
+ (int)part_stat_read_accum(disk->part0, sectors);
/* Updating the RCU protected object in place is necessary since
this function gets called from atomic context.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 7df79ae6b0a1..dfe1dfc901cc 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -402,7 +402,6 @@ static struct floppy_drive_params drive_params[N_DRIVE];
static struct floppy_drive_struct drive_state[N_DRIVE];
static struct floppy_write_errors write_errors[N_DRIVE];
static struct timer_list motor_off_timer[N_DRIVE];
-static struct gendisk *disks[N_DRIVE];
static struct blk_mq_tag_set tag_sets[N_DRIVE];
static struct block_device *opened_bdev[N_DRIVE];
static DEFINE_MUTEX(open_lock);
@@ -477,6 +476,8 @@ static struct floppy_struct floppy_type[32] = {
{ 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
};
+static struct gendisk *disks[N_DRIVE][ARRAY_SIZE(floppy_type)];
+
#define SECTSIZE (_FD_SECTSIZE(*floppy))
/* Auto-detection: Disk type used until the next media change occurs. */
@@ -4111,7 +4112,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
new_dev = MINOR(bdev->bd_dev);
drive_state[drive].fd_device = new_dev;
- set_capacity(disks[drive], floppy_sizes[new_dev]);
+ set_capacity(disks[drive][ITYPE(new_dev)], floppy_sizes[new_dev]);
if (old_dev != -1 && old_dev != new_dev) {
if (buffer_drive == drive)
buffer_track = -1;
@@ -4579,15 +4580,58 @@ static bool floppy_available(int drive)
return true;
}
-static struct kobject *floppy_find(dev_t dev, int *part, void *data)
+static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
- int drive = (*part & 3) | ((*part & 0x80) >> 5);
- if (drive >= N_DRIVE || !floppy_available(drive))
- return NULL;
- if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
- return NULL;
- *part = 0;
- return get_disk_and_module(disks[drive]);
+ struct gendisk *disk;
+ int err;
+
+ disk = alloc_disk(1);
+ if (!disk)
+ return -ENOMEM;
+
+ disk->queue = blk_mq_init_queue(&tag_sets[drive]);
+ if (IS_ERR(disk->queue)) {
+ err = PTR_ERR(disk->queue);
+ disk->queue = NULL;
+ put_disk(disk);
+ return err;
+ }
+
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
+ blk_queue_max_hw_sectors(disk->queue, 64);
+ disk->major = FLOPPY_MAJOR;
+ disk->first_minor = TOMINOR(drive) | (type << 2);
+ disk->fops = &floppy_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
+ if (type)
+ sprintf(disk->disk_name, "fd%d_type%d", drive, type);
+ else
+ sprintf(disk->disk_name, "fd%d", drive);
+ /* to be cleaned up... */
+ disk->private_data = (void *)(long)drive;
+ disk->flags |= GENHD_FL_REMOVABLE;
+
+ disks[drive][type] = disk;
+ return 0;
+}
+
+static DEFINE_MUTEX(floppy_probe_lock);
+
+static void floppy_probe(dev_t dev)
+{
+ unsigned int drive = (MINOR(dev) & 3) | ((MINOR(dev) & 0x80) >> 5);
+ unsigned int type = (MINOR(dev) >> 2) & 0x1f;
+
+ if (drive >= N_DRIVE || !floppy_available(drive) ||
+ type >= ARRAY_SIZE(floppy_type))
+ return;
+
+ mutex_lock(&floppy_probe_lock);
+ if (!disks[drive][type]) {
+ if (floppy_alloc_disk(drive, type) == 0)
+ add_disk(disks[drive][type]);
+ }
+ mutex_unlock(&floppy_probe_lock);
}
static int __init do_floppy_init(void)
@@ -4609,33 +4653,25 @@ static int __init do_floppy_init(void)
return -ENOMEM;
for (drive = 0; drive < N_DRIVE; drive++) {
- disks[drive] = alloc_disk(1);
- if (!disks[drive]) {
- err = -ENOMEM;
+ memset(&tag_sets[drive], 0, sizeof(tag_sets[drive]));
+ tag_sets[drive].ops = &floppy_mq_ops;
+ tag_sets[drive].nr_hw_queues = 1;
+ tag_sets[drive].nr_maps = 1;
+ tag_sets[drive].queue_depth = 2;
+ tag_sets[drive].numa_node = NUMA_NO_NODE;
+ tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
+ err = blk_mq_alloc_tag_set(&tag_sets[drive]);
+ if (err)
goto out_put_disk;
- }
- disks[drive]->queue = blk_mq_init_sq_queue(&tag_sets[drive],
- &floppy_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(disks[drive]->queue)) {
- err = PTR_ERR(disks[drive]->queue);
- disks[drive]->queue = NULL;
+ err = floppy_alloc_disk(drive, 0);
+ if (err)
goto out_put_disk;
- }
-
- blk_queue_bounce_limit(disks[drive]->queue, BLK_BOUNCE_HIGH);
- blk_queue_max_hw_sectors(disks[drive]->queue, 64);
- disks[drive]->major = FLOPPY_MAJOR;
- disks[drive]->first_minor = TOMINOR(drive);
- disks[drive]->fops = &floppy_fops;
- disks[drive]->events = DISK_EVENT_MEDIA_CHANGE;
- sprintf(disks[drive]->disk_name, "fd%d", drive);
timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
}
- err = register_blkdev(FLOPPY_MAJOR, "fd");
+ err = __register_blkdev(FLOPPY_MAJOR, "fd", floppy_probe);
if (err)
goto out_put_disk;
@@ -4643,9 +4679,6 @@ static int __init do_floppy_init(void)
if (err)
goto out_unreg_blkdev;
- blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
- floppy_find, NULL, NULL);
-
for (i = 0; i < 256; i++)
if (ITYPE(i))
floppy_sizes[i] = floppy_type[ITYPE(i)].size;
@@ -4673,7 +4706,7 @@ static int __init do_floppy_init(void)
if (fdc_state[0].address == -1) {
cancel_delayed_work(&fd_timeout);
err = -ENODEV;
- goto out_unreg_region;
+ goto out_unreg_driver;
}
#if N_FDC > 1
fdc_state[1].address = FDC2;
@@ -4684,7 +4717,7 @@ static int __init do_floppy_init(void)
if (err) {
cancel_delayed_work(&fd_timeout);
err = -EBUSY;
- goto out_unreg_region;
+ goto out_unreg_driver;
}
/* initialise drive state */
@@ -4761,10 +4794,8 @@ static int __init do_floppy_init(void)
if (err)
goto out_remove_drives;
- /* to be cleaned up... */
- disks[drive]->private_data = (void *)(long)drive;
- disks[drive]->flags |= GENHD_FL_REMOVABLE;
- device_add_disk(&floppy_device[drive].dev, disks[drive], NULL);
+ device_add_disk(&floppy_device[drive].dev, disks[drive][0],
+ NULL);
}
return 0;
@@ -4772,30 +4803,27 @@ static int __init do_floppy_init(void)
out_remove_drives:
while (drive--) {
if (floppy_available(drive)) {
- del_gendisk(disks[drive]);
+ del_gendisk(disks[drive][0]);
platform_device_unregister(&floppy_device[drive]);
}
}
out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
-out_unreg_region:
- blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
+out_unreg_driver:
platform_driver_unregister(&floppy_driver);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
- if (!disks[drive])
+ if (!disks[drive][0])
break;
- if (disks[drive]->queue) {
- del_timer_sync(&motor_off_timer[drive]);
- blk_cleanup_queue(disks[drive]->queue);
- disks[drive]->queue = NULL;
- blk_mq_free_tag_set(&tag_sets[drive]);
- }
- put_disk(disks[drive]);
+ del_timer_sync(&motor_off_timer[drive]);
+ blk_cleanup_queue(disks[drive][0]->queue);
+ disks[drive][0]->queue = NULL;
+ blk_mq_free_tag_set(&tag_sets[drive]);
+ put_disk(disks[drive][0]);
}
return err;
}
@@ -5006,9 +5034,8 @@ module_init(floppy_module_init);
static void __exit floppy_module_exit(void)
{
- int drive;
+ int drive, i;
- blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
@@ -5018,10 +5045,16 @@ static void __exit floppy_module_exit(void)
del_timer_sync(&motor_off_timer[drive]);
if (floppy_available(drive)) {
- del_gendisk(disks[drive]);
+ for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
+ if (disks[drive][i])
+ del_gendisk(disks[drive][i]);
+ }
platform_device_unregister(&floppy_device[drive]);
}
- blk_cleanup_queue(disks[drive]->queue);
+ for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
+ if (disks[drive][i])
+ blk_cleanup_queue(disks[drive][i]->queue);
+ }
blk_mq_free_tag_set(&tag_sets[drive]);
/*
@@ -5029,10 +5062,17 @@ static void __exit floppy_module_exit(void)
* queue reference in put_disk().
*/
if (!(allowed_drive_mask & (1 << drive)) ||
- fdc_state[FDC(drive)].version == FDC_NONE)
- disks[drive]->queue = NULL;
+ fdc_state[FDC(drive)].version == FDC_NONE) {
+ for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
+ if (disks[drive][i])
+ disks[drive][i]->queue = NULL;
+ }
+ }
- put_disk(disks[drive]);
+ for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
+ if (disks[drive][i])
+ put_disk(disks[drive][i]);
+ }
}
cancel_delayed_work_sync(&fd_timeout);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a58084c2ed7c..e5ff328f0917 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -251,12 +251,8 @@ loop_validate_block_size(unsigned short bsize)
*/
static void loop_set_size(struct loop_device *lo, loff_t size)
{
- struct block_device *bdev = lo->lo_device;
-
- bd_set_nr_sectors(bdev, size);
-
- if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false))
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+ if (!set_capacity_and_notify(lo->lo_disk, size))
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
static inline int
@@ -679,10 +675,10 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
while (is_loop_device(f)) {
struct loop_device *l;
- if (f->f_mapping->host->i_bdev == bdev)
+ if (f->f_mapping->host->i_rdev == bdev->bd_dev)
return -EBADF;
- l = f->f_mapping->host->i_bdev->bd_disk->private_data;
+ l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
if (l->lo_state != Lo_bound) {
return -EINVAL;
}
@@ -889,9 +885,7 @@ static void loop_config_discard(struct loop_device *lo)
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
- struct request_queue *backingq;
-
- backingq = bdev_get_queue(inode->i_bdev);
+ struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
granularity = backingq->limits.discard_granularity ?:
@@ -1075,7 +1069,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
struct file *file;
struct inode *inode;
struct address_space *mapping;
- struct block_device *claimed_bdev = NULL;
int error;
loff_t size;
bool partscan;
@@ -1094,8 +1087,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
* here to avoid changing device under exclusive owner.
*/
if (!(mode & FMODE_EXCL)) {
- claimed_bdev = bdev->bd_contains;
- error = bd_prepare_to_claim(bdev, claimed_bdev, loop_configure);
+ error = bd_prepare_to_claim(bdev, loop_configure);
if (error)
goto out_putf;
}
@@ -1138,7 +1130,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (error)
goto out_unlock;
- set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
+ set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
@@ -1168,9 +1160,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
size = get_loop_size(lo, file);
loop_set_size(lo, size);
- set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
- block_size(inode->i_bdev) : PAGE_SIZE);
-
lo->lo_state = Lo_bound;
if (part_shift)
lo->lo_flags |= LO_FLAGS_PARTSCAN;
@@ -1185,15 +1174,15 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
mutex_unlock(&loop_ctl_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
- if (claimed_bdev)
- bd_abort_claiming(bdev, claimed_bdev, loop_configure);
+ if (!(mode & FMODE_EXCL))
+ bd_abort_claiming(bdev, loop_configure);
return 0;
out_unlock:
mutex_unlock(&loop_ctl_mutex);
out_bdev:
- if (claimed_bdev)
- bd_abort_claiming(bdev, claimed_bdev, loop_configure);
+ if (!(mode & FMODE_EXCL))
+ bd_abort_claiming(bdev, loop_configure);
out_putf:
fput(file);
out:
@@ -1252,7 +1241,6 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
set_capacity(lo->lo_disk, 0);
loop_sysfs_exit(lo);
if (bdev) {
- bd_set_nr_sectors(bdev, 0);
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
}
@@ -2235,24 +2223,18 @@ out:
return ret;
}
-static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+static void loop_probe(dev_t dev)
{
+ int idx = MINOR(dev) >> part_shift;
struct loop_device *lo;
- struct kobject *kobj;
- int err;
+
+ if (max_loop && idx >= max_loop)
+ return;
mutex_lock(&loop_ctl_mutex);
- err = loop_lookup(&lo, MINOR(dev) >> part_shift);
- if (err < 0)
- err = loop_add(&lo, MINOR(dev) >> part_shift);
- if (err < 0)
- kobj = NULL;
- else
- kobj = get_disk_and_module(lo->lo_disk);
+ if (loop_lookup(&lo, idx) < 0)
+ loop_add(&lo, idx);
mutex_unlock(&loop_ctl_mutex);
-
- *part = 0;
- return kobj;
}
static long loop_control_ioctl(struct file *file, unsigned int cmd,
@@ -2322,7 +2304,6 @@ MODULE_ALIAS("devname:loop-control");
static int __init loop_init(void)
{
int i, nr;
- unsigned long range;
struct loop_device *lo;
int err;
@@ -2359,27 +2340,21 @@ static int __init loop_init(void)
* /dev/loop-control interface, or be instantiated by accessing
* a 'dead' device node.
*/
- if (max_loop) {
+ if (max_loop)
nr = max_loop;
- range = max_loop << part_shift;
- } else {
+ else
nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
- range = 1UL << MINORBITS;
- }
err = misc_register(&loop_misc);
if (err < 0)
goto err_out;
- if (register_blkdev(LOOP_MAJOR, "loop")) {
+ if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) {
err = -EIO;
goto misc_out;
}
- blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
- THIS_MODULE, loop_probe, NULL, NULL);
-
/* pre-create number of devices given by config or max_loop */
mutex_lock(&loop_ctl_mutex);
for (i = 0; i < nr; i++)
@@ -2405,16 +2380,11 @@ static int loop_exit_cb(int id, void *ptr, void *data)
static void __exit loop_exit(void)
{
- unsigned long range;
-
- range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
-
mutex_lock(&loop_ctl_mutex);
idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
idr_destroy(&loop_index_idr);
- blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
unregister_blkdev(LOOP_MAJOR, "loop");
misc_deregister(&loop_misc);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 153e2cdecb4d..53ac59d19ae5 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3687,7 +3687,6 @@ skip_create_disk:
/* Enable the block device and add it to /dev */
device_add_disk(&dd->pdev->dev, dd->disk, NULL);
- dd->bdev = bdget_disk(dd->disk, 0);
/*
* Now that the disk is active, initialize any sysfs attributes
* managed by the protocol layer.
@@ -3721,9 +3720,6 @@ start_service_thread:
return rv;
kthread_run_error:
- bdput(dd->bdev);
- dd->bdev = NULL;
-
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
@@ -3804,14 +3800,6 @@ static int mtip_block_remove(struct driver_data *dd)
blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
blk_mq_unquiesce_queue(dd->queue);
- /*
- * Delete our gendisk structure. This also removes the device
- * from /dev
- */
- if (dd->bdev) {
- bdput(dd->bdev);
- dd->bdev = NULL;
- }
if (dd->disk) {
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
del_gendisk(dd->disk);
@@ -4206,9 +4194,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
} while (atomic_read(&dd->irq_workers_active) != 0 &&
time_before(jiffies, to));
- if (!dd->sr)
- fsync_bdev(dd->bdev);
-
if (atomic_read(&dd->irq_workers_active) != 0) {
dev_warn(&dd->pdev->dev,
"Completion workers still active!\n");
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index e22a7f0523bf..88f4206310e4 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -463,8 +463,6 @@ struct driver_data {
int isr_binding;
- struct block_device *bdev;
-
struct list_head online_list; /* linkage for online list */
struct list_head remove_list; /* linkage for removing list */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index aaae9220f3a0..e6ea5d344f87 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -296,40 +296,33 @@ static void nbd_size_clear(struct nbd_device *nbd)
}
}
-static void nbd_size_update(struct nbd_device *nbd, bool start)
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
+ loff_t blksize)
{
- struct nbd_config *config = nbd->config;
- struct block_device *bdev = bdget_disk(nbd->disk, 0);
- sector_t nr_sectors = config->bytesize >> 9;
+ if (!blksize)
+ blksize = NBD_DEF_BLKSIZE;
+ if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
+ return -EINVAL;
- if (config->flags & NBD_FLAG_SEND_TRIM) {
- nbd->disk->queue->limits.discard_granularity = config->blksize;
- nbd->disk->queue->limits.discard_alignment = config->blksize;
+ nbd->config->bytesize = bytesize;
+ nbd->config->blksize = blksize;
+
+ if (!nbd->task_recv)
+ return 0;
+
+ if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
+ nbd->disk->queue->limits.discard_granularity = blksize;
+ nbd->disk->queue->limits.discard_alignment = blksize;
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
}
- blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
- blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
- set_capacity(nbd->disk, nr_sectors);
- if (bdev) {
- if (bdev->bd_disk) {
- bd_set_nr_sectors(bdev, nr_sectors);
- if (start)
- set_blocksize(bdev, config->blksize);
- } else
- set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
- bdput(bdev);
- }
- kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
-}
+ blk_queue_logical_block_size(nbd->disk->queue, blksize);
+ blk_queue_physical_block_size(nbd->disk->queue, blksize);
-static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
- loff_t nr_blocks)
-{
- struct nbd_config *config = nbd->config;
- config->blksize = blocksize;
- config->bytesize = blocksize * nr_blocks;
- if (nbd->task_recv != NULL)
- nbd_size_update(nbd, false);
+ if (max_part)
+ set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
+ if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+ return 0;
}
static void nbd_complete_rq(struct request *req)
@@ -1029,6 +1022,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
if (!sock)
return err;
+ /*
+ * We need to make sure we don't get any errant requests while we're
+ * reallocating the ->socks array.
+ */
+ blk_mq_freeze_queue(nbd->disk->queue);
+
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current;
@@ -1067,10 +1066,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
+ blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
put_socket:
+ blk_mq_unfreeze_queue(nbd->disk->queue);
sockfd_put(sock);
return err;
}
@@ -1140,7 +1141,7 @@ static void nbd_bdev_reset(struct block_device *bdev)
{
if (bdev->bd_openers > 1)
return;
- bd_set_nr_sectors(bdev, 0);
+ set_capacity(bdev->bd_disk, 0);
}
static void nbd_parse_flags(struct nbd_device *nbd)
@@ -1309,8 +1310,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
- nbd_size_update(nbd, true);
- return error;
+ return nbd_set_size(nbd, config->bytesize, config->blksize);
}
static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
@@ -1352,14 +1352,6 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
nbd_config_put(nbd);
}
-static bool nbd_is_valid_blksize(unsigned long blksize)
-{
- if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
- blksize > PAGE_SIZE)
- return false;
- return true;
-}
-
static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
{
nbd->tag_set.timeout = timeout * HZ;
@@ -1384,20 +1376,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_SOCK:
return nbd_add_socket(nbd, arg, false);
case NBD_SET_BLKSIZE:
- if (!arg)
- arg = NBD_DEF_BLKSIZE;
- if (!nbd_is_valid_blksize(arg))
- return -EINVAL;
- nbd_size_set(nbd, arg,
- div_s64(config->bytesize, arg));
- return 0;
+ return nbd_set_size(nbd, config->bytesize, arg);
case NBD_SET_SIZE:
- nbd_size_set(nbd, config->blksize,
- div_s64(arg, config->blksize));
- return 0;
+ return nbd_set_size(nbd, arg, config->blksize);
case NBD_SET_SIZE_BLOCKS:
- nbd_size_set(nbd, config->blksize, arg);
- return 0;
+ return nbd_set_size(nbd, arg * config->blksize,
+ config->blksize);
case NBD_SET_TIMEOUT:
nbd_set_cmd_timeout(nbd, arg);
return 0;
@@ -1501,9 +1485,11 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
- set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+ if (max_part)
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
} else if (nbd_disconnected(nbd->config)) {
- set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+ if (max_part)
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
}
out:
mutex_unlock(&nbd_index_mutex);
@@ -1513,12 +1499,10 @@ out:
static void nbd_release(struct gendisk *disk, fmode_t mode)
{
struct nbd_device *nbd = disk->private_data;
- struct block_device *bdev = bdget_disk(disk, 0);
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
- bdev->bd_openers == 0)
+ disk->part0->bd_openers == 0)
nbd_disconnect_and_put(nbd);
- bdput(bdev);
nbd_config_put(nbd);
nbd_put(nbd);
@@ -1815,18 +1799,11 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
if (info->attrs[NBD_ATTR_SIZE_BYTES])
bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
- if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
+ if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
- if (!bsize)
- bsize = NBD_DEF_BLKSIZE;
- if (!nbd_is_valid_blksize(bsize)) {
- printk(KERN_ERR "Invalid block size %llu\n", bsize);
- return -EINVAL;
- }
- }
if (bytes != config->bytesize || bsize != config->blksize)
- nbd_size_set(nbd, bsize, div64_u64(bytes, bsize));
+ return nbd_set_size(nbd, bytes, bsize);
return 0;
}
diff --git a/drivers/block/null_blk/Kconfig b/drivers/block/null_blk/Kconfig
new file mode 100644
index 000000000000..6bf1f8ca20a2
--- /dev/null
+++ b/drivers/block/null_blk/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Null block device driver configuration
+#
+
+config BLK_DEV_NULL_BLK
+ tristate "Null test block driver"
+ select CONFIGFS_FS
+
+config BLK_DEV_NULL_BLK_FAULT_INJECTION
+ bool "Support fault injection for Null test block driver"
+ depends on BLK_DEV_NULL_BLK && FAULT_INJECTION
diff --git a/drivers/block/null_blk/Makefile b/drivers/block/null_blk/Makefile
new file mode 100644
index 000000000000..84c36e512ab8
--- /dev/null
+++ b/drivers/block/null_blk/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# needed for trace events
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
+null_blk-objs := main.o
+ifeq ($(CONFIG_BLK_DEV_ZONED), y)
+null_blk-$(CONFIG_TRACING) += trace.o
+endif
+null_blk-$(CONFIG_BLK_DEV_ZONED) += zoned.o
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk/main.c
index 4685ea401d5b..5357c3a4a36f 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk/main.c
@@ -152,6 +152,10 @@ static int g_bs = 512;
module_param_named(bs, g_bs, int, 0444);
MODULE_PARM_DESC(bs, "Block size (in bytes)");
+static int g_max_sectors;
+module_param_named(max_sectors, g_max_sectors, int, 0444);
+MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
+
static unsigned int nr_devices = 1;
module_param(nr_devices, uint, 0444);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
@@ -346,6 +350,7 @@ NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
+NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
NULLB_DEVICE_ATTR(irqmode, uint, NULL);
NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
NULLB_DEVICE_ATTR(index, uint, NULL);
@@ -463,6 +468,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_home_node,
&nullb_device_attr_queue_mode,
&nullb_device_attr_blocksize,
+ &nullb_device_attr_max_sectors,
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
@@ -533,7 +539,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active\n");
+ "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -588,6 +594,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->home_node = g_home_node;
dev->queue_mode = g_queue_mode;
dev->blocksize = g_bs;
+ dev->max_sectors = g_max_sectors;
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
@@ -1076,13 +1083,16 @@ static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
kunmap_atomic(dst);
}
-static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
+blk_status_t null_handle_discard(struct nullb_device *dev,
+ sector_t sector, sector_t nr_sectors)
{
+ struct nullb *nullb = dev->nullb;
+ size_t n = nr_sectors << SECTOR_SHIFT;
size_t temp;
spin_lock_irq(&nullb->lock);
while (n > 0) {
- temp = min_t(size_t, n, nullb->dev->blocksize);
+ temp = min_t(size_t, n, dev->blocksize);
null_free_sector(nullb, sector, false);
if (null_cache_active(nullb))
null_free_sector(nullb, sector, true);
@@ -1090,6 +1100,8 @@ static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
n -= temp;
}
spin_unlock_irq(&nullb->lock);
+
+ return BLK_STS_OK;
}
static int null_handle_flush(struct nullb *nullb)
@@ -1149,17 +1161,10 @@ static int null_handle_rq(struct nullb_cmd *cmd)
struct nullb *nullb = cmd->nq->dev->nullb;
int err;
unsigned int len;
- sector_t sector;
+ sector_t sector = blk_rq_pos(rq);
struct req_iterator iter;
struct bio_vec bvec;
- sector = blk_rq_pos(rq);
-
- if (req_op(rq) == REQ_OP_DISCARD) {
- null_handle_discard(nullb, sector, blk_rq_bytes(rq));
- return 0;
- }
-
spin_lock_irq(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
@@ -1183,18 +1188,10 @@ static int null_handle_bio(struct nullb_cmd *cmd)
struct nullb *nullb = cmd->nq->dev->nullb;
int err;
unsigned int len;
- sector_t sector;
+ sector_t sector = bio->bi_iter.bi_sector;
struct bio_vec bvec;
struct bvec_iter iter;
- sector = bio->bi_iter.bi_sector;
-
- if (bio_op(bio) == REQ_OP_DISCARD) {
- null_handle_discard(nullb, sector,
- bio_sectors(bio) << SECTOR_SHIFT);
- return 0;
- }
-
spin_lock_irq(&nullb->lock);
bio_for_each_segment(bvec, bio, iter) {
len = bvec.bv_len;
@@ -1263,11 +1260,16 @@ static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
}
static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
- enum req_opf op)
+ enum req_opf op,
+ sector_t sector,
+ sector_t nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
int err;
+ if (op == REQ_OP_DISCARD)
+ return null_handle_discard(dev, sector, nr_sectors);
+
if (dev->queue_mode == NULL_Q_BIO)
err = null_handle_bio(cmd);
else
@@ -1343,7 +1345,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd,
}
if (dev->memory_backed)
- return null_handle_memory_backed(cmd, op);
+ return null_handle_memory_backed(cmd, op, sector, nr_sectors);
return BLK_STS_OK;
}
@@ -1589,6 +1591,12 @@ static void null_config_discard(struct nullb *nullb)
if (nullb->dev->discard == false)
return;
+ if (!nullb->dev->memory_backed) {
+ nullb->dev->discard = false;
+ pr_info("discard option is ignored without memory backing\n");
+ return;
+ }
+
if (nullb->dev->zoned) {
nullb->dev->discard = false;
pr_info("discard option is ignored in zoned mode\n");
@@ -1866,6 +1874,11 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_logical_block_size(nullb->q, dev->blocksize);
blk_queue_physical_block_size(nullb->q, dev->blocksize);
+ if (!dev->max_sectors)
+ dev->max_sectors = queue_max_hw_sectors(nullb->q);
+ dev->max_sectors = min_t(unsigned int, dev->max_sectors,
+ BLK_DEF_MAX_SECTORS);
+ blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
null_config_discard(nullb);
@@ -1909,6 +1922,12 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
+ if (g_max_sectors > BLK_DEF_MAX_SECTORS) {
+ pr_warn("invalid max sectors\n");
+ pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS);
+ g_max_sectors = BLK_DEF_MAX_SECTORS;
+ }
+
if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
pr_err("invalid home_node value\n");
g_home_node = NUMA_NO_NODE;
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk/null_blk.h
index c24d9b5ad81a..83504f3cc9d6 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -12,6 +12,8 @@
#include <linux/configfs.h>
#include <linux/badblocks.h>
#include <linux/fault-inject.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
struct nullb_cmd {
struct request *rq;
@@ -32,6 +34,26 @@ struct nullb_queue {
struct nullb_cmd *cmds;
};
+struct nullb_zone {
+ /*
+ * Zone lock to prevent concurrent modification of a zone write
+ * pointer position and condition: with memory backing, a write
+ * command execution may sleep on memory allocation. For this case,
+ * use mutex as the zone lock. Otherwise, use the spinlock for
+ * locking the zone.
+ */
+ union {
+ spinlock_t spinlock;
+ struct mutex mutex;
+ };
+ enum blk_zone_type type;
+ enum blk_zone_cond cond;
+ sector_t start;
+ sector_t wp;
+ unsigned int len;
+ unsigned int capacity;
+};
+
struct nullb_device {
struct nullb *nullb;
struct config_item item;
@@ -45,10 +67,11 @@ struct nullb_device {
unsigned int nr_zones_imp_open;
unsigned int nr_zones_exp_open;
unsigned int nr_zones_closed;
- struct blk_zone *zones;
+ unsigned int imp_close_zone_no;
+ struct nullb_zone *zones;
sector_t zone_size_sects;
- spinlock_t zone_lock;
- unsigned long *zone_locks;
+ bool need_zone_res_mgmt;
+ spinlock_t zone_res_lock;
unsigned long size; /* device size in MB */
unsigned long completion_nsec; /* time in ns to complete a request */
@@ -62,6 +85,7 @@ struct nullb_device {
unsigned int home_node; /* home node for the device */
unsigned int queue_mode; /* block interface */
unsigned int blocksize; /* block size */
+ unsigned int max_sectors; /* Max sectors per command */
unsigned int irqmode; /* IRQ completion handler */
unsigned int hw_queue_depth; /* queue depth */
unsigned int index; /* index of the disk, only valid with a disk */
@@ -93,6 +117,8 @@ struct nullb {
char disk_name[DISK_NAME_LEN];
};
+blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
+ sector_t nr_sectors);
blk_status_t null_process_cmd(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
unsigned int nr_sectors);
diff --git a/drivers/block/null_blk_trace.c b/drivers/block/null_blk/trace.c
index f246e7bff698..3711cba16071 100644
--- a/drivers/block/null_blk_trace.c
+++ b/drivers/block/null_blk/trace.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
*/
-#include "null_blk_trace.h"
+#include "trace.h"
/*
* Helper to use for all null_blk traces to extract disk name.
diff --git a/drivers/block/null_blk_trace.h b/drivers/block/null_blk/trace.h
index 4f83032eb544..ce3b430e88c5 100644
--- a/drivers/block/null_blk_trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -73,7 +73,7 @@ TRACE_EVENT(nullb_report_zones,
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE null_blk_trace
+#define TRACE_INCLUDE_FILE trace
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk/zoned.c
index beb34b4f76b0..fce0a54df0e5 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -4,19 +4,61 @@
#include "null_blk.h"
#define CREATE_TRACE_POINTS
-#include "null_blk_trace.h"
+#include "trace.h"
-/* zone_size in MBs to sectors. */
-#define ZONE_SIZE_SHIFT 11
+static inline sector_t mb_to_sects(unsigned long mb)
+{
+ return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
+}
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
return sect >> ilog2(dev->zone_size_sects);
}
+static inline void null_lock_zone_res(struct nullb_device *dev)
+{
+ if (dev->need_zone_res_mgmt)
+ spin_lock_irq(&dev->zone_res_lock);
+}
+
+static inline void null_unlock_zone_res(struct nullb_device *dev)
+{
+ if (dev->need_zone_res_mgmt)
+ spin_unlock_irq(&dev->zone_res_lock);
+}
+
+static inline void null_init_zone_lock(struct nullb_device *dev,
+ struct nullb_zone *zone)
+{
+ if (!dev->memory_backed)
+ spin_lock_init(&zone->spinlock);
+ else
+ mutex_init(&zone->mutex);
+}
+
+static inline void null_lock_zone(struct nullb_device *dev,
+ struct nullb_zone *zone)
+{
+ if (!dev->memory_backed)
+ spin_lock_irq(&zone->spinlock);
+ else
+ mutex_lock(&zone->mutex);
+}
+
+static inline void null_unlock_zone(struct nullb_device *dev,
+ struct nullb_zone *zone)
+{
+ if (!dev->memory_backed)
+ spin_unlock_irq(&zone->spinlock);
+ else
+ mutex_unlock(&zone->mutex);
+}
+
int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
{
- sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
+ sector_t dev_capacity_sects, zone_capacity_sects;
+ struct nullb_zone *zone;
sector_t sector = 0;
unsigned int i;
@@ -38,29 +80,18 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
return -EINVAL;
}
- dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
- dev->nr_zones = dev_size >>
- (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
- dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
- GFP_KERNEL | __GFP_ZERO);
+ zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+ dev_capacity_sects = mb_to_sects(dev->size);
+ dev->zone_size_sects = mb_to_sects(dev->zone_size);
+ dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
+ >> ilog2(dev->zone_size_sects);
+
+ dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
+ GFP_KERNEL | __GFP_ZERO);
if (!dev->zones)
return -ENOMEM;
- /*
- * With memory backing, the zone_lock spinlock needs to be temporarily
- * released to avoid scheduling in atomic context. To guarantee zone
- * information protection, use a bitmap to lock zones with
- * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
- * implies that the queue is marked with BLK_MQ_F_BLOCKING.
- */
- spin_lock_init(&dev->zone_lock);
- if (dev->memory_backed) {
- dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
- if (!dev->zone_locks) {
- kvfree(dev->zones);
- return -ENOMEM;
- }
- }
+ spin_lock_init(&dev->zone_res_lock);
if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1;
@@ -83,10 +114,13 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
dev->zone_max_open = 0;
pr_info("zone_max_open limit disabled, limit >= zone count\n");
}
+ dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
+ dev->imp_close_zone_no = dev->zone_nr_conv;
for (i = 0; i < dev->zone_nr_conv; i++) {
- struct blk_zone *zone = &dev->zones[i];
+ zone = &dev->zones[i];
+ null_init_zone_lock(dev, zone);
zone->start = sector;
zone->len = dev->zone_size_sects;
zone->capacity = zone->len;
@@ -98,11 +132,16 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
}
for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
- struct blk_zone *zone = &dev->zones[i];
+ zone = &dev->zones[i];
+ null_init_zone_lock(dev, zone);
zone->start = zone->wp = sector;
- zone->len = dev->zone_size_sects;
- zone->capacity = dev->zone_capacity << ZONE_SIZE_SHIFT;
+ if (zone->start + dev->zone_size_sects > dev_capacity_sects)
+ zone->len = dev_capacity_sects - zone->start;
+ else
+ zone->len = dev->zone_size_sects;
+ zone->capacity =
+ min_t(sector_t, zone->len, zone_capacity_sects);
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone->cond = BLK_ZONE_COND_EMPTY;
@@ -140,32 +179,17 @@ int null_register_zoned_dev(struct nullb *nullb)
void null_free_zoned_dev(struct nullb_device *dev)
{
- bitmap_free(dev->zone_locks);
kvfree(dev->zones);
}
-static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
-{
- if (dev->memory_backed)
- wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
- spin_lock_irq(&dev->zone_lock);
-}
-
-static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
-{
- spin_unlock_irq(&dev->zone_lock);
-
- if (dev->memory_backed)
- clear_and_wake_up_bit(zno, dev->zone_locks);
-}
-
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
- unsigned int first_zone, i, zno;
- struct blk_zone zone;
+ unsigned int first_zone, i;
+ struct nullb_zone *zone;
+ struct blk_zone blkz;
int error;
first_zone = null_zone_no(dev, sector);
@@ -175,19 +199,25 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
nr_zones = min(nr_zones, dev->nr_zones - first_zone);
trace_nullb_report_zones(nullb, nr_zones);
- zno = first_zone;
- for (i = 0; i < nr_zones; i++, zno++) {
+ memset(&blkz, 0, sizeof(struct blk_zone));
+ zone = &dev->zones[first_zone];
+ for (i = 0; i < nr_zones; i++, zone++) {
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback.
* So use a local copy to avoid corruption of the device zone
* array.
*/
- null_lock_zone(dev, zno);
- memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone));
- null_unlock_zone(dev, zno);
-
- error = cb(&zone, i, data);
+ null_lock_zone(dev, zone);
+ blkz.start = zone->start;
+ blkz.len = zone->len;
+ blkz.wp = zone->wp;
+ blkz.type = zone->type;
+ blkz.cond = zone->cond;
+ blkz.capacity = zone->capacity;
+ null_unlock_zone(dev, zone);
+
+ error = cb(&blkz, i, data);
if (error)
return error;
}
@@ -203,7 +233,7 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len)
{
struct nullb_device *dev = nullb->dev;
- struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
+ struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
unsigned int nr_sectors = len >> SECTOR_SHIFT;
/* Read must be below the write pointer position */
@@ -217,11 +247,9 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
return (zone->wp - sector) << SECTOR_SHIFT;
}
-static blk_status_t null_close_zone(struct nullb_device *dev, struct blk_zone *zone)
+static blk_status_t __null_close_zone(struct nullb_device *dev,
+ struct nullb_zone *zone)
{
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- return BLK_STS_IOERR;
-
switch (zone->cond) {
case BLK_ZONE_COND_CLOSED:
/* close operation on closed is not an error */
@@ -248,13 +276,24 @@ static blk_status_t null_close_zone(struct nullb_device *dev, struct blk_zone *z
return BLK_STS_OK;
}
-static void null_close_first_imp_zone(struct nullb_device *dev)
+static void null_close_imp_open_zone(struct nullb_device *dev)
{
- unsigned int i;
+ struct nullb_zone *zone;
+ unsigned int zno, i;
+
+ zno = dev->imp_close_zone_no;
+ if (zno >= dev->nr_zones)
+ zno = dev->zone_nr_conv;
for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
- if (dev->zones[i].cond == BLK_ZONE_COND_IMP_OPEN) {
- null_close_zone(dev, &dev->zones[i]);
+ zone = &dev->zones[zno];
+ zno++;
+ if (zno >= dev->nr_zones)
+ zno = dev->zone_nr_conv;
+
+ if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
+ __null_close_zone(dev, zone);
+ dev->imp_close_zone_no = zno;
return;
}
}
@@ -282,7 +321,7 @@ static blk_status_t null_check_open(struct nullb_device *dev)
if (dev->nr_zones_imp_open) {
if (null_check_active(dev) == BLK_STS_OK) {
- null_close_first_imp_zone(dev);
+ null_close_imp_open_zone(dev);
return BLK_STS_OK;
}
}
@@ -303,7 +342,8 @@ static blk_status_t null_check_open(struct nullb_device *dev)
* it is not certain that closing an implicit open zone will allow a new zone
* to be opened, since we might already be at the active limit capacity.
*/
-static blk_status_t null_check_zone_resources(struct nullb_device *dev, struct blk_zone *zone)
+static blk_status_t null_check_zone_resources(struct nullb_device *dev,
+ struct nullb_zone *zone)
{
blk_status_t ret;
@@ -327,34 +367,23 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
- struct blk_zone *zone = &dev->zones[zno];
+ struct nullb_zone *zone = &dev->zones[zno];
blk_status_t ret;
trace_nullb_zone_op(cmd, zno, zone->cond);
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ if (append)
+ return BLK_STS_IOERR;
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ }
- null_lock_zone(dev, zno);
+ null_lock_zone(dev, zone);
- switch (zone->cond) {
- case BLK_ZONE_COND_FULL:
+ if (zone->cond == BLK_ZONE_COND_FULL) {
/* Cannot write to a full zone */
ret = BLK_STS_IOERR;
goto unlock;
- case BLK_ZONE_COND_EMPTY:
- case BLK_ZONE_COND_CLOSED:
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK)
- goto unlock;
- break;
- case BLK_ZONE_COND_IMP_OPEN:
- case BLK_ZONE_COND_EXP_OPEN:
- break;
- default:
- /* Invalid zone condition */
- ret = BLK_STS_IOERR;
- goto unlock;
}
/*
@@ -379,60 +408,69 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
goto unlock;
}
- if (zone->cond == BLK_ZONE_COND_CLOSED) {
- dev->nr_zones_closed--;
- dev->nr_zones_imp_open++;
- } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
- dev->nr_zones_imp_open++;
+ if (zone->cond == BLK_ZONE_COND_CLOSED ||
+ zone->cond == BLK_ZONE_COND_EMPTY) {
+ null_lock_zone_res(dev);
+
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ null_unlock_zone_res(dev);
+ goto unlock;
+ }
+ if (zone->cond == BLK_ZONE_COND_CLOSED) {
+ dev->nr_zones_closed--;
+ dev->nr_zones_imp_open++;
+ } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
+ dev->nr_zones_imp_open++;
+ }
+
+ if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ null_unlock_zone_res(dev);
}
- if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
- zone->cond = BLK_ZONE_COND_IMP_OPEN;
- /*
- * Memory backing allocation may sleep: release the zone_lock spinlock
- * to avoid scheduling in atomic context. Zone operation atomicity is
- * still guaranteed through the zone_locks bitmap.
- */
- if (dev->memory_backed)
- spin_unlock_irq(&dev->zone_lock);
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
- if (dev->memory_backed)
- spin_lock_irq(&dev->zone_lock);
-
if (ret != BLK_STS_OK)
goto unlock;
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
+ null_lock_zone_res(dev);
if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
dev->nr_zones_exp_open--;
else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
dev->nr_zones_imp_open--;
zone->cond = BLK_ZONE_COND_FULL;
+ null_unlock_zone_res(dev);
}
+
ret = BLK_STS_OK;
unlock:
- null_unlock_zone(dev, zno);
+ null_unlock_zone(dev, zone);
return ret;
}
-static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone)
+static blk_status_t null_open_zone(struct nullb_device *dev,
+ struct nullb_zone *zone)
{
- blk_status_t ret;
+ blk_status_t ret = BLK_STS_OK;
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
+ null_lock_zone_res(dev);
+
switch (zone->cond) {
case BLK_ZONE_COND_EXP_OPEN:
/* open operation on exp open is not an error */
- return BLK_STS_OK;
+ goto unlock;
case BLK_ZONE_COND_EMPTY:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
- return ret;
+ goto unlock;
break;
case BLK_ZONE_COND_IMP_OPEN:
dev->nr_zones_imp_open--;
@@ -440,35 +478,57 @@ static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zo
case BLK_ZONE_COND_CLOSED:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
- return ret;
+ goto unlock;
dev->nr_zones_closed--;
break;
case BLK_ZONE_COND_FULL:
default:
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
}
zone->cond = BLK_ZONE_COND_EXP_OPEN;
dev->nr_zones_exp_open++;
- return BLK_STS_OK;
+unlock:
+ null_unlock_zone_res(dev);
+
+ return ret;
}
-static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *zone)
+static blk_status_t null_close_zone(struct nullb_device *dev,
+ struct nullb_zone *zone)
{
blk_status_t ret;
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
+ null_lock_zone_res(dev);
+ ret = __null_close_zone(dev, zone);
+ null_unlock_zone_res(dev);
+
+ return ret;
+}
+
+static blk_status_t null_finish_zone(struct nullb_device *dev,
+ struct nullb_zone *zone)
+{
+ blk_status_t ret = BLK_STS_OK;
+
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ null_lock_zone_res(dev);
+
switch (zone->cond) {
case BLK_ZONE_COND_FULL:
/* finish operation on full is not an error */
- return BLK_STS_OK;
+ goto unlock;
case BLK_ZONE_COND_EMPTY:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
- return ret;
+ goto unlock;
break;
case BLK_ZONE_COND_IMP_OPEN:
dev->nr_zones_imp_open--;
@@ -479,27 +539,35 @@ static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *
case BLK_ZONE_COND_CLOSED:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
- return ret;
+ goto unlock;
dev->nr_zones_closed--;
break;
default:
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
}
zone->cond = BLK_ZONE_COND_FULL;
zone->wp = zone->start + zone->len;
- return BLK_STS_OK;
+unlock:
+ null_unlock_zone_res(dev);
+
+ return ret;
}
-static blk_status_t null_reset_zone(struct nullb_device *dev, struct blk_zone *zone)
+static blk_status_t null_reset_zone(struct nullb_device *dev,
+ struct nullb_zone *zone)
{
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
+ null_lock_zone_res(dev);
+
switch (zone->cond) {
case BLK_ZONE_COND_EMPTY:
/* reset operation on empty is not an error */
+ null_unlock_zone_res(dev);
return BLK_STS_OK;
case BLK_ZONE_COND_IMP_OPEN:
dev->nr_zones_imp_open--;
@@ -513,12 +581,18 @@ static blk_status_t null_reset_zone(struct nullb_device *dev, struct blk_zone *z
case BLK_ZONE_COND_FULL:
break;
default:
+ null_unlock_zone_res(dev);
return BLK_STS_IOERR;
}
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
+ null_unlock_zone_res(dev);
+
+ if (dev->memory_backed)
+ return null_handle_discard(dev, zone->start, zone->len);
+
return BLK_STS_OK;
}
@@ -527,19 +601,19 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zone_no;
- struct blk_zone *zone;
+ struct nullb_zone *zone;
blk_status_t ret;
size_t i;
if (op == REQ_OP_ZONE_RESET_ALL) {
for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
- null_lock_zone(dev, i);
zone = &dev->zones[i];
+ null_lock_zone(dev, zone);
if (zone->cond != BLK_ZONE_COND_EMPTY) {
null_reset_zone(dev, zone);
trace_nullb_zone_op(cmd, i, zone->cond);
}
- null_unlock_zone(dev, i);
+ null_unlock_zone(dev, zone);
}
return BLK_STS_OK;
}
@@ -547,7 +621,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
zone_no = null_zone_no(dev, sector);
zone = &dev->zones[zone_no];
- null_lock_zone(dev, zone_no);
+ null_lock_zone(dev, zone);
switch (op) {
case REQ_OP_ZONE_RESET:
@@ -570,7 +644,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
if (ret == BLK_STS_OK)
trace_nullb_zone_op(cmd, zone_no, zone->cond);
- null_unlock_zone(dev, zone_no);
+ null_unlock_zone(dev, zone);
return ret;
}
@@ -578,29 +652,28 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector, sector_t nr_sectors)
{
- struct nullb_device *dev = cmd->nq->dev;
- unsigned int zno = null_zone_no(dev, sector);
+ struct nullb_device *dev;
+ struct nullb_zone *zone;
blk_status_t sts;
switch (op) {
case REQ_OP_WRITE:
- sts = null_zone_write(cmd, sector, nr_sectors, false);
- break;
+ return null_zone_write(cmd, sector, nr_sectors, false);
case REQ_OP_ZONE_APPEND:
- sts = null_zone_write(cmd, sector, nr_sectors, true);
- break;
+ return null_zone_write(cmd, sector, nr_sectors, true);
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
- sts = null_zone_mgmt(cmd, op, sector);
- break;
+ return null_zone_mgmt(cmd, op, sector);
default:
- null_lock_zone(dev, zno);
+ dev = cmd->nq->dev;
+ zone = &dev->zones[null_zone_no(dev, sector)];
+
+ null_lock_zone(dev, zone);
sts = null_process_cmd(cmd, op, sector, nr_sectors);
- null_unlock_zone(dev, zno);
+ null_unlock_zone(dev, zone);
+ return sts;
}
-
- return sts;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 467dbd06b7cd..b8bb8ec7538d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2130,8 +2130,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
}
set_capacity(pd->disk, lba << 2);
- set_capacity(pd->bdev->bd_disk, lba << 2);
- bd_set_nr_sectors(pd->bdev, lba << 2);
+ set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
q = bdev_get_queue(pd->bdev);
if (write) {
@@ -2584,9 +2583,11 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
case CDROM_LAST_WRITTEN:
case CDROM_SEND_PACKET:
case SCSI_IOCTL_SEND_COMMAND:
- ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
+ if (!bdev->bd_disk->fops->ioctl)
+ ret = -ENOTTY;
+ else
+ ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
break;
-
default:
pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
ret = -ENOTTY;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 7b55811c2a81..ba3ece56cbb3 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -507,7 +507,7 @@ fail:
return error;
}
-static int ps3disk_remove(struct ps3_system_bus_device *_dev)
+static void ps3disk_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
@@ -526,7 +526,6 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
kfree(dev->bounce_buf);
kfree(priv);
ps3_system_bus_set_drvdata(_dev, NULL);
- return 0;
}
static struct ps3_system_bus_driver ps3disk = {
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 1088798c8dd0..b71d28372ef3 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -797,7 +797,7 @@ fail:
return error;
}
-static int ps3vram_remove(struct ps3_system_bus_device *dev)
+static void ps3vram_remove(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -817,7 +817,6 @@ static int ps3vram_remove(struct ps3_system_bus_device *dev)
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
kfree(priv);
ps3_system_bus_set_drvdata(dev, NULL);
- return 0;
}
static struct ps3_system_bus_driver ps3vram = {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index f84128abade3..59cfe71d0b3a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -692,12 +692,9 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
put_device(&rbd_dev->dev);
}
-static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
+static int rbd_set_read_only(struct block_device *bdev, bool ro)
{
- int ro;
-
- if (get_user(ro, (int __user *)arg))
- return -EFAULT;
+ struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
/*
* Both images mapped read-only and snapshots can't be marked
@@ -710,43 +707,14 @@ static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
rbd_assert(!rbd_is_snap(rbd_dev));
}
- /* Let blkdev_roset() handle it */
- return -ENOTTY;
-}
-
-static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
- int ret;
-
- switch (cmd) {
- case BLKROSET:
- ret = rbd_ioctl_set_ro(rbd_dev, arg);
- break;
- default:
- ret = -ENOTTY;
- }
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- return rbd_ioctl(bdev, mode, cmd, arg);
+ return 0;
}
-#endif /* CONFIG_COMPAT */
static const struct block_device_operations rbd_bd_ops = {
.owner = THIS_MODULE,
.open = rbd_open,
.release = rbd_release,
- .ioctl = rbd_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = rbd_compat_ioctl,
-#endif
+ .set_read_only = rbd_set_read_only,
};
/*
@@ -3957,8 +3925,12 @@ static int find_watcher(struct rbd_device *rbd_dev,
sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
for (i = 0; i < num_watchers; i++) {
- if (!memcmp(&watchers[i].addr, &locker->info.addr,
- sizeof(locker->info.addr)) &&
+ /*
+ * Ignore addr->type while comparing. This mimics
+ * entity_addr_t::get_legacy_str() + strcmp().
+ */
+ if (ceph_addr_equal_no_type(&watchers[i].addr,
+ &locker->info.addr) &&
watchers[i].cookie == cookie) {
struct rbd_client_id cid = {
.gid = le64_to_cpu(watchers[i].name.num),
@@ -4920,8 +4892,7 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
!test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
- set_capacity(rbd_dev->disk, size);
- revalidate_disk_size(rbd_dev->disk, true);
+ set_capacity_and_notify(rbd_dev->disk, size);
}
}
diff --git a/drivers/block/rnbd/Kconfig b/drivers/block/rnbd/Kconfig
index 4b6d3d816d1f..2ff05a0d2646 100644
--- a/drivers/block/rnbd/Kconfig
+++ b/drivers/block/rnbd/Kconfig
@@ -7,6 +7,7 @@ config BLK_DEV_RNBD_CLIENT
tristate "RDMA Network Block Device driver client"
depends on INFINIBAND_RTRS_CLIENT
select BLK_DEV_RNBD
+ select SG_POOL
help
RNBD client is a network block device driver using rdma transport.
diff --git a/drivers/block/rnbd/README b/drivers/block/rnbd/README
index 1773c0aa0bd4..080f58a5400a 100644
--- a/drivers/block/rnbd/README
+++ b/drivers/block/rnbd/README
@@ -90,3 +90,4 @@ Kleber Souza <kleber.souza@profitbricks.com>
Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
Milind Dumbare <Milind.dumbare@gmail.com>
Roman Penyaev <roman.penyaev@profitbricks.com>
+Swapnil Ingle <ingleswapnil@gmail.com>
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 4f4474eecadb..d4aa6bfc9555 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -37,7 +37,6 @@ enum {
};
static const unsigned int rnbd_opt_mandatory[] = {
- RNBD_OPT_PATH,
RNBD_OPT_DEV_PATH,
RNBD_OPT_SESSNAME,
};
@@ -433,8 +432,9 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
* i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
* of sysfs link already was removed already.
*/
- if (strlen(dev->blk_symlink_name) && try_module_get(THIS_MODULE)) {
+ if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
+ kfree(dev->blk_symlink_name);
module_put(THIS_MODULE);
}
}
@@ -451,9 +451,11 @@ static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
ret = kobject_init_and_add(&dev->kobj, &rnbd_dev_ktype, gd_kobj, "%s",
"rnbd");
- if (ret)
+ if (ret) {
rnbd_clt_err(dev, "Failed to create device sysfs dir, err: %d\n",
ret);
+ kobject_put(&dev->kobj);
+ }
return ret;
}
@@ -481,16 +483,27 @@ static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
if (ret >= len)
return -ENAMETOOLONG;
+ ret = snprintf(buf, len, "%s@%s", buf, dev->sess->sessname);
+ if (ret >= len)
+ return -ENAMETOOLONG;
+
return 0;
}
static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
{
struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
- int ret;
+ int ret, len;
+
+ len = strlen(dev->pathname) + strlen(dev->sess->sessname) + 2;
+ dev->blk_symlink_name = kzalloc(len, GFP_KERNEL);
+ if (!dev->blk_symlink_name) {
+ rnbd_clt_err(dev, "Failed to allocate memory for blk_symlink_name\n");
+ return -ENOMEM;
+ }
ret = rnbd_clt_get_path_name(dev, dev->blk_symlink_name,
- sizeof(dev->blk_symlink_name));
+ len);
if (ret) {
rnbd_clt_err(dev, "Failed to get /sys/block symlink path, err: %d\n",
ret);
@@ -508,7 +521,8 @@ static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
return 0;
out_err:
- dev->blk_symlink_name[0] = '\0';
+ kfree(dev->blk_symlink_name);
+ dev->blk_symlink_name = NULL ;
return ret;
}
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 8b2411ccbda9..45a470076652 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -59,6 +59,7 @@ static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
ida_simple_remove(&index_ida, dev->clt_device_id);
mutex_unlock(&ida_lock);
kfree(dev->hw_queues);
+ kfree(dev->pathname);
rnbd_clt_put_sess(dev->sess);
mutex_destroy(&dev->lock);
kfree(dev);
@@ -87,6 +88,8 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
dev->secure_discard = le16_to_cpu(rsp->secure_discard);
dev->rotational = rsp->rotational;
+ dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
+ dev->fua = !!(rsp->cache_policy & RNBD_FUA);
dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
dev->max_segments = BMAX_SEGMENTS;
@@ -100,8 +103,7 @@ static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
dev->nsectors, new_nsectors);
dev->nsectors = new_nsectors;
- set_capacity(dev->gd, dev->nsectors);
- revalidate_disk_size(dev->gd, true);
+ set_capacity_and_notify(dev->gd, dev->nsectors);
return 0;
}
@@ -347,32 +349,48 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
struct rnbd_iu *iu;
struct rtrs_permit *permit;
+ iu = kzalloc(sizeof(*iu), GFP_KERNEL);
+ if (!iu) {
+ return NULL;
+ }
+
permit = rnbd_get_permit(sess, con_type,
wait ? RTRS_PERMIT_WAIT :
RTRS_PERMIT_NOWAIT);
- if (unlikely(!permit))
+ if (unlikely(!permit)) {
+ kfree(iu);
return NULL;
- iu = rtrs_permit_to_pdu(permit);
+ }
+
iu->permit = permit;
/*
* 1st reference is dropped after finishing sending a "user" message,
* 2nd reference is dropped after confirmation with the response is
* returned.
* 1st and 2nd can happen in any order, so the rnbd_iu should be
- * released (rtrs_permit returned to ibbtrs) only leased after both
+ * released (rtrs_permit returned to rtrs) only after both
* are finished.
*/
atomic_set(&iu->refcount, 2);
init_waitqueue_head(&iu->comp.wait);
iu->comp.errno = INT_MAX;
+ if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
+ rnbd_put_permit(sess, permit);
+ kfree(iu);
+ return NULL;
+ }
+
return iu;
}
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
{
- if (atomic_dec_and_test(&iu->refcount))
+ if (atomic_dec_and_test(&iu->refcount)) {
+ sg_free_table(&iu->sgt);
rnbd_put_permit(sess, iu->permit);
+ kfree(iu);
+ }
}
static void rnbd_softirq_done_fn(struct request *rq)
@@ -382,6 +400,7 @@ static void rnbd_softirq_done_fn(struct request *rq)
struct rnbd_iu *iu;
iu = blk_mq_rq_to_pdu(rq);
+ sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
rnbd_put_permit(sess, iu->permit);
blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
}
@@ -475,8 +494,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
iu->buf = NULL;
iu->dev = dev;
- sg_mark_end(&iu->sglist[0]);
-
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
msg.device_id = cpu_to_le32(device_id);
@@ -562,7 +579,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
iu->buf = rsp;
iu->dev = dev;
- sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+ sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
msg.access_mode = dev->access_mode;
@@ -570,7 +587,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
WARN_ON(!rnbd_clt_get_dev(dev));
err = send_usr_msg(sess->rtrs, READ, iu,
- &vec, sizeof(*rsp), iu->sglist, 1,
+ &vec, sizeof(*rsp), iu->sgt.sgl, 1,
msg_open_conf, &errno, wait);
if (err) {
rnbd_clt_put_dev(dev);
@@ -607,8 +624,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
iu->buf = rsp;
iu->sess = sess;
-
- sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+ sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
msg.ver = RNBD_PROTO_VER_MAJOR;
@@ -624,7 +640,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
goto put_iu;
}
err = send_usr_msg(sess->rtrs, READ, iu,
- &vec, sizeof(*rsp), iu->sglist, 1,
+ &vec, sizeof(*rsp), iu->sgt.sgl, 1,
msg_sess_info_conf, &errno, wait);
if (err) {
rnbd_clt_put_sess(sess);
@@ -634,7 +650,6 @@ put_iu:
} else {
err = errno;
}
-
rnbd_put_iu(sess, iu);
return err;
}
@@ -803,7 +818,7 @@ static struct rnbd_clt_session *alloc_sess(const char *sessname)
rnbd_init_cpu_qlists(sess->cpu_queues);
/*
- * That is simple percpu variable which stores cpu indeces, which are
+ * That is simple percpu variable which stores cpu indices, which are
* incremented on each access. We need that for the sake of fairness
* to wake up queues in a round-robin manner.
*/
@@ -1014,11 +1029,10 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
* See queue limits.
*/
if (req_op(rq) != REQ_OP_DISCARD)
- sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sglist);
+ sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
if (sg_cnt == 0)
- /* Do not forget to mark the end */
- sg_mark_end(&iu->sglist[0]);
+ sg_mark_end(&iu->sgt.sgl[0]);
msg.hdr.type = cpu_to_le16(RNBD_MSG_IO);
msg.device_id = cpu_to_le32(dev->device_id);
@@ -1027,13 +1041,13 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
.iov_base = &msg,
.iov_len = sizeof(msg)
};
- size = rnbd_clt_get_sg_size(iu->sglist, sg_cnt);
+ size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt);
req_ops = (struct rtrs_clt_req_ops) {
.priv = iu,
.conf_fn = msg_io_conf,
};
err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
- &vec, 1, size, iu->sglist, sg_cnt);
+ &vec, 1, size, iu->sgt.sgl, sg_cnt);
if (unlikely(err)) {
rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
err);
@@ -1120,6 +1134,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
int err;
+ blk_status_t ret = BLK_STS_IOERR;
if (unlikely(dev->dev_state != DEV_STATE_MAPPED))
return BLK_STS_IOERR;
@@ -1131,32 +1146,35 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_RESOURCE;
}
+ iu->sgt.sgl = iu->first_sgl;
+ err = sg_alloc_table_chained(&iu->sgt,
+ /* Even-if the request has no segment,
+ * sglist must have one entry at least */
+ blk_rq_nr_phys_segments(rq) ? : 1,
+ iu->sgt.sgl,
+ RNBD_INLINE_SG_CNT);
+ if (err) {
+ rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err);
+ rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
+ rnbd_put_permit(dev->sess, iu->permit);
+ return BLK_STS_RESOURCE;
+ }
+
blk_mq_start_request(rq);
err = rnbd_client_xfer_request(dev, rq, iu);
if (likely(err == 0))
return BLK_STS_OK;
if (unlikely(err == -EAGAIN || err == -ENOMEM)) {
rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
- rnbd_put_permit(dev->sess, iu->permit);
- return BLK_STS_RESOURCE;
+ ret = BLK_STS_RESOURCE;
}
-
+ sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
rnbd_put_permit(dev->sess, iu->permit);
- return BLK_STS_IOERR;
-}
-
-static int rnbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
-
- sg_init_table(iu->sglist, BMAX_SEGMENTS);
- return 0;
+ return ret;
}
static struct blk_mq_ops rnbd_mq_ops = {
.queue_rq = rnbd_queue_rq,
- .init_request = rnbd_init_request,
.complete = rnbd_softirq_done_fn,
};
@@ -1170,7 +1188,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->numa_node = NUMA_NO_NODE;
tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_TAG_QUEUE_SHARED;
- tag_set->cmd_size = sizeof(struct rnbd_iu);
+ tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
tag_set->nr_hw_queues = num_online_cpus();
return blk_mq_alloc_tag_set(tag_set);
@@ -1193,6 +1211,12 @@ find_and_get_or_create_sess(const char *sessname,
else if (!first)
return sess;
+ if (!path_cnt) {
+ pr_err("Session %s not found, and path parameter not given", sessname);
+ err = -ENXIO;
+ goto put_sess;
+ }
+
rtrs_ops = (struct rtrs_clt_ops) {
.priv = sess,
.link_ev = rnbd_clt_link_ev,
@@ -1202,7 +1226,7 @@ find_and_get_or_create_sess(const char *sessname,
*/
sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
paths, path_cnt, port_nr,
- sizeof(struct rnbd_iu),
+ 0, /* Do not use pdu of rtrs */
RECONNECT_DELAY, BMAX_SEGMENTS,
BLK_MAX_SEGMENT_SIZE,
MAX_RECONNECTS);
@@ -1299,7 +1323,7 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
blk_queue_max_segments(dev->queue, dev->max_segments);
blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
- blk_queue_write_cache(dev->queue, true, true);
+ blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
dev->queue->queuedata = dev;
}
@@ -1381,10 +1405,16 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
pathname, sess->sessname, ret);
goto out_queues;
}
+
+ dev->pathname = kstrdup(pathname, GFP_KERNEL);
+ if (!dev->pathname) {
+ ret = -ENOMEM;
+ goto out_queues;
+ }
+
dev->clt_device_id = ret;
dev->sess = sess;
dev->access_mode = access_mode;
- strlcpy(dev->pathname, pathname, sizeof(dev->pathname));
mutex_init(&dev->lock);
refcount_set(&dev->refcount, 1);
dev->dev_state = DEV_STATE_INIT;
@@ -1404,17 +1434,20 @@ out_alloc:
return ERR_PTR(ret);
}
-static bool __exists_dev(const char *pathname)
+static bool __exists_dev(const char *pathname, const char *sessname)
{
struct rnbd_clt_session *sess;
struct rnbd_clt_dev *dev;
bool found = false;
list_for_each_entry(sess, &sess_list, list) {
+ if (sessname && strncmp(sess->sessname, sessname,
+ sizeof(sess->sessname)))
+ continue;
mutex_lock(&sess->lock);
list_for_each_entry(dev, &sess->devs_list, list) {
- if (!strncmp(dev->pathname, pathname,
- sizeof(dev->pathname))) {
+ if (strlen(dev->pathname) == strlen(pathname) &&
+ !strcmp(dev->pathname, pathname)) {
found = true;
break;
}
@@ -1427,12 +1460,12 @@ static bool __exists_dev(const char *pathname)
return found;
}
-static bool exists_devpath(const char *pathname)
+static bool exists_devpath(const char *pathname, const char *sessname)
{
bool found;
mutex_lock(&sess_lock);
- found = __exists_dev(pathname);
+ found = __exists_dev(pathname, sessname);
mutex_unlock(&sess_lock);
return found;
@@ -1445,7 +1478,7 @@ static bool insert_dev_if_not_exists_devpath(const char *pathname,
bool found;
mutex_lock(&sess_lock);
- found = __exists_dev(pathname);
+ found = __exists_dev(pathname, sess->sessname);
if (!found) {
mutex_lock(&sess->lock);
list_add_tail(&dev->list, &sess->devs_list);
@@ -1475,7 +1508,7 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
struct rnbd_clt_dev *dev;
int ret;
- if (exists_devpath(pathname))
+ if (unlikely(exists_devpath(pathname, sessname)))
return ERR_PTR(-EEXIST);
sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr);
@@ -1513,13 +1546,13 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
}
rnbd_clt_info(dev,
- "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n",
+ "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, dev->nsectors,
dev->logical_block_size, dev->physical_block_size,
dev->max_write_same_sectors, dev->max_discard_sectors,
dev->discard_granularity, dev->discard_alignment,
dev->secure_discard, dev->max_segments,
- dev->max_hw_sectors, dev->rotational);
+ dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua);
mutex_unlock(&dev->lock);
@@ -1651,7 +1684,7 @@ static void rnbd_destroy_sessions(void)
/*
* Here at this point there is no any concurrent access to sessions
* list and devices list:
- * 1. New session or device can'be be created - session sysfs files
+ * 1. New session or device can't be created - session sysfs files
* are removed.
* 2. Device or session can't be removed - module reference is taken
* into account in unmap device sysfs callback.
@@ -1664,7 +1697,8 @@ static void rnbd_destroy_sessions(void)
*/
list_for_each_entry_safe(sess, sn, &sess_list, list) {
- WARN_ON(!rnbd_clt_get_sess(sess));
+ if (!rnbd_clt_get_sess(sess))
+ continue;
close_rtrs(sess);
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index ed33654aa486..537d499dad3b 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -44,6 +44,13 @@ struct rnbd_iu_comp {
int errno;
};
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define RNBD_INLINE_SG_CNT 0
+#else
+#define RNBD_INLINE_SG_CNT 2
+#endif
+#define RNBD_RDMA_SGL_SIZE (sizeof(struct scatterlist) * RNBD_INLINE_SG_CNT)
+
struct rnbd_iu {
union {
struct request *rq; /* for block io */
@@ -56,11 +63,12 @@ struct rnbd_iu {
/* use to send msg associated with a sess */
struct rnbd_clt_session *sess;
};
- struct scatterlist sglist[BMAX_SEGMENTS];
+ struct sg_table sgt;
struct work_struct work;
int errno;
struct rnbd_iu_comp comp;
atomic_t refcount;
+ struct scatterlist first_sgl[]; /* must be the last one */
};
struct rnbd_cpu_qlist {
@@ -108,10 +116,12 @@ struct rnbd_clt_dev {
u32 clt_device_id;
struct mutex lock;
enum rnbd_clt_dev_state dev_state;
- char pathname[NAME_MAX];
+ char *pathname;
enum rnbd_access_mode access_mode;
bool read_only;
bool rotational;
+ bool wc;
+ bool fua;
u32 max_hw_sectors;
u32 max_write_same_sectors;
u32 max_discard_sectors;
@@ -126,7 +136,7 @@ struct rnbd_clt_dev {
struct list_head list;
struct gendisk *gd;
struct kobject kobj;
- char blk_symlink_name[NAME_MAX];
+ char *blk_symlink_name;
refcount_t refcount;
struct work_struct unmap_on_rmmod_work;
};
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index ca166241452c..c1bc5c0fef71 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -108,6 +108,11 @@ struct rnbd_msg_close {
__le32 device_id;
};
+enum rnbd_cache_policy {
+ RNBD_FUA = 1 << 0,
+ RNBD_WRITEBACK = 1 << 1,
+};
+
/**
* struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
* @hdr: message header
@@ -124,6 +129,7 @@ struct rnbd_msg_close {
* @max_segments: max segments hardware support in one transfer
* @secure_discard: supports secure discard
* @rotation: is a rotational disc?
+ * @cache_policy: support write-back caching or FUA?
*/
struct rnbd_msg_open_rsp {
struct rnbd_msg_hdr hdr;
@@ -139,7 +145,8 @@ struct rnbd_msg_open_rsp {
__le16 max_segments;
__le16 secure_discard;
u8 rotational;
- u8 reserved[11];
+ u8 cache_policy;
+ u8 reserved[10];
};
/**
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
index 106775c074d1..05ffe488ddc6 100644
--- a/drivers/block/rnbd/rnbd-srv-sysfs.c
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -47,13 +47,17 @@ int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
ret = kobject_init_and_add(&dev->dev_kobj, &dev_ktype,
rnbd_devs_kobj, dev_name);
- if (ret)
+ if (ret) {
+ kobject_put(&dev->dev_kobj);
return ret;
+ }
dev->dev_sessions_kobj = kobject_create_and_add("sessions",
&dev->dev_kobj);
- if (!dev->dev_sessions_kobj)
- goto put_dev_kobj;
+ if (!dev->dev_sessions_kobj) {
+ ret = -ENOMEM;
+ goto free_dev_kobj;
+ }
bdev_kobj = &disk_to_dev(bdev->bd_disk)->kobj;
ret = sysfs_create_link(&dev->dev_kobj, bdev_kobj, "block_dev");
@@ -64,7 +68,8 @@ int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
put_sess_kobj:
kobject_put(dev->dev_sessions_kobj);
-put_dev_kobj:
+free_dev_kobj:
+ kobject_del(&dev->dev_kobj);
kobject_put(&dev->dev_kobj);
return ret;
}
@@ -120,10 +125,46 @@ static ssize_t mapping_path_show(struct kobject *kobj,
static struct kobj_attribute rnbd_srv_dev_session_mapping_path_attr =
__ATTR_RO(mapping_path);
+static ssize_t rnbd_srv_dev_session_force_close_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_srv_dev_session_force_close_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ if (!sysfs_streq(buf, "1")) {
+ rnbd_srv_err(sess_dev, "%s: invalid value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+
+ rnbd_srv_info(sess_dev, "force close requested\n");
+
+ /* first remove sysfs itself to avoid deadlock */
+ sysfs_remove_file_self(&sess_dev->kobj, &attr->attr);
+ rnbd_srv_sess_dev_force_close(sess_dev);
+
+ return count;
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_force_close_attr =
+ __ATTR(force_close, 0644,
+ rnbd_srv_dev_session_force_close_show,
+ rnbd_srv_dev_session_force_close_store);
+
static struct attribute *rnbd_srv_default_dev_sessions_attrs[] = {
&rnbd_srv_dev_session_access_mode_attr.attr,
&rnbd_srv_dev_session_ro_attr.attr,
&rnbd_srv_dev_session_mapping_path_attr.attr,
+ &rnbd_srv_dev_session_force_close_attr.attr,
NULL,
};
@@ -145,7 +186,7 @@ static void rnbd_srv_sess_dev_release(struct kobject *kobj)
struct rnbd_srv_sess_dev *sess_dev;
sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
- rnbd_destroy_sess_dev(sess_dev);
+ rnbd_destroy_sess_dev(sess_dev, sess_dev->keep_id);
}
static struct kobj_type rnbd_srv_sess_dev_ktype = {
@@ -160,18 +201,17 @@ int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev)
ret = kobject_init_and_add(&sess_dev->kobj, &rnbd_srv_sess_dev_ktype,
sess_dev->dev->dev_sessions_kobj, "%s",
sess_dev->sess->sessname);
- if (ret)
+ if (ret) {
+ kobject_put(&sess_dev->kobj);
return ret;
+ }
ret = sysfs_create_group(&sess_dev->kobj,
&rnbd_srv_default_dev_session_attr_group);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- kobject_put(&sess_dev->kobj);
+ if (ret) {
+ kobject_del(&sess_dev->kobj);
+ kobject_put(&sess_dev->kobj);
+ }
return ret;
}
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index e1bc8b4cd592..a6a68d44f517 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -212,12 +212,20 @@ static void rnbd_put_srv_dev(struct rnbd_srv_dev *dev)
kref_put(&dev->kref, destroy_device_cb);
}
-void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
+void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
{
DECLARE_COMPLETION_ONSTACK(dc);
- xa_erase(&sess_dev->sess->index_idr, sess_dev->device_id);
+ if (keep_id)
+ /* free the resources for the id but don't */
+ /* allow to re-use the id itself because it */
+ /* is still used by the client */
+ xa_cmpxchg(&sess_dev->sess->index_idr, sess_dev->device_id,
+ sess_dev, NULL, 0);
+ else
+ xa_erase(&sess_dev->sess->index_idr, sess_dev->device_id);
synchronize_rcu();
+
sess_dev->destroy_comp = &dc;
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
@@ -328,6 +336,16 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
}
}
+void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
+{
+ struct rnbd_srv_session *sess = sess_dev->sess;
+
+ sess_dev->keep_id = true;
+ mutex_lock(&sess->lock);
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&sess->lock);
+}
+
static int process_msg_close(struct rtrs_srv *rtrs,
struct rnbd_srv_session *srv_sess,
void *data, size_t datalen, const void *usr,
@@ -534,6 +552,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
+ struct request_queue *q = bdev_get_queue(rnbd_dev->bdev);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id =
@@ -558,8 +577,12 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
rsp->secure_discard =
cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
- rsp->rotational =
- !blk_queue_nonrot(bdev_get_queue(rnbd_dev->bdev));
+ rsp->rotational = !blk_queue_nonrot(q);
+ rsp->cache_policy = 0;
+ if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ rsp->cache_policy |= RNBD_WRITEBACK;
+ if (blk_queue_fua(q))
+ rsp->cache_policy |= RNBD_FUA;
}
static struct rnbd_srv_sess_dev *
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 5a8544b5e74f..b157371c25ed 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -56,6 +56,7 @@ struct rnbd_srv_sess_dev {
struct rnbd_srv_dev *dev;
struct kobject kobj;
u32 device_id;
+ bool keep_id;
fmode_t open_flags;
struct kref kref;
struct completion *destroy_comp;
@@ -63,6 +64,7 @@ struct rnbd_srv_sess_dev {
enum rnbd_access_mode access_mode;
};
+void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev);
/* rnbd-srv-sysfs.c */
int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
@@ -73,6 +75,6 @@ int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
int rnbd_srv_create_sysfs_files(void);
void rnbd_srv_destroy_sysfs_files(void);
-void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev);
+void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id);
#endif /* RNBD_SRV_H */
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 52dd1efa00f9..cc6a0bc6c005 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -745,18 +745,6 @@ static const struct block_device_operations floppy_fops = {
.check_events = floppy_check_events,
};
-static struct kobject *floppy_find(dev_t dev, int *part, void *data)
-{
- struct swim_priv *swd = data;
- int drive = (*part & 3);
-
- if (drive >= swd->floppy_count)
- return NULL;
-
- *part = 0;
- return get_disk_and_module(swd->unit[drive].disk);
-}
-
static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
{
struct floppy_state *fs = &swd->unit[swd->floppy_count];
@@ -846,9 +834,6 @@ static int swim_floppy_init(struct swim_priv *swd)
add_disk(swd->unit[drive].disk);
}
- blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
- floppy_find, NULL, swd);
-
return 0;
exit_put_disks:
@@ -932,8 +917,6 @@ static int swim_remove(struct platform_device *dev)
int drive;
struct resource *res;
- blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
-
for (drive = 0; drive < swd->floppy_count; drive++) {
del_gendisk(swd->unit[drive].disk);
blk_cleanup_queue(swd->unit[drive].disk->queue);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index a314b9382442..145606dc52db 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -470,7 +470,7 @@ static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
cap_str_10,
cap_str_2);
- set_capacity_revalidate_and_notify(vblk->disk, capacity, true);
+ set_capacity_and_notify(vblk->disk, capacity);
}
static void virtblk_config_changed_work(struct work_struct *work)
@@ -598,7 +598,6 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
struct virtio_blk *vblk = vdev->priv;
blk_queue_write_cache(vblk->disk->queue, writeback, false);
- revalidate_disk_size(vblk->disk, true);
}
static const char *const virtblk_cache_types[] = {
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index a1b9df2c4ef1..b0c71d3a81a0 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -356,9 +356,7 @@ struct pending_req {
};
-#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
- (_v)->bdev->bd_part->nr_sects : \
- get_capacity((_v)->bdev->bd_disk))
+#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev)
#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
#define xen_blkif_put(_b) \
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 76912c584a76..9860d4842f36 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -274,6 +274,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
if (ring->xenblkd) {
kthread_stop(ring->xenblkd);
+ ring->xenblkd = NULL;
wake_up(&ring->shutdown_wq);
}
@@ -675,7 +676,8 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
/* setup back pointer */
be->blkif->be = be;
- err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
+ err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
+ backend_changed,
"%s/%s", dev->nodename, "physical-device");
if (err)
goto fail;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 48629d3433b4..e1c6798889f4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_granularity = info->discard_granularity ?:
+ info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@@ -2153,7 +2154,7 @@ static void blkfront_closing(struct blkfront_info *info)
}
if (info->gd)
- bdev = bdget_disk(info->gd, 0);
+ bdev = bdgrab(info->gd->part0);
mutex_unlock(&info->mutex);
@@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info)
static void blkfront_setup_discard(struct blkfront_info *info)
{
- int err;
- unsigned int discard_granularity;
- unsigned int discard_alignment;
-
info->feature_discard = 1;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-granularity", "%u", &discard_granularity,
- "discard-alignment", "%u", &discard_alignment,
- NULL);
- if (!err) {
- info->discard_granularity = discard_granularity;
- info->discard_alignment = discard_alignment;
- }
+ info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-granularity",
+ 0);
+ info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);
@@ -2370,7 +2364,7 @@ static void blkfront_connect(struct blkfront_info *info)
return;
printk(KERN_INFO "Setting capacity to %Lu\n",
sectors);
- set_capacity_revalidate_and_notify(info->gd, sectors, true);
+ set_capacity_and_notify(info->gd, sectors);
return;
case BLKIF_STATE_SUSPENDED:
@@ -2462,6 +2456,7 @@ static void blkback_changed(struct xenbus_device *dev,
break;
if (talk_to_blkback(dev, info))
break;
+ break;
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
@@ -2518,7 +2513,7 @@ static int blkfront_remove(struct xenbus_device *xbdev)
disk = info->gd;
if (disk)
- bdev = bdget_disk(disk, 0);
+ bdev = bdgrab(disk->part0);
info->xbdev = NULL;
mutex_unlock(&info->mutex);
@@ -2595,19 +2590,11 @@ out:
static void blkif_release(struct gendisk *disk, fmode_t mode)
{
struct blkfront_info *info = disk->private_data;
- struct block_device *bdev;
struct xenbus_device *xbdev;
mutex_lock(&blkfront_mutex);
-
- bdev = bdget_disk(disk, 0);
-
- if (!bdev) {
- WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
+ if (disk->part0->bd_openers)
goto out_mutex;
- }
- if (bdev->bd_openers)
- goto out;
/*
* Check if we have been instructed to close. We will have
@@ -2619,7 +2606,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
if (xbdev && xbdev->state == XenbusStateClosing) {
/* pending switch to state closed */
- dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
+ dev_info(disk_to_dev(disk), "releasing disk\n");
xlvbd_release_gendisk(info);
xenbus_frontend_closed(info->xbdev);
}
@@ -2628,14 +2615,12 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
if (!xbdev) {
/* sudden device removal */
- dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
+ dev_info(disk_to_dev(disk), "releasing disk\n");
xlvbd_release_gendisk(info);
disk->private_data = NULL;
free_info(info);
}
-out:
- bdput(bdev);
out_mutex:
mutex_unlock(&blkfront_mutex);
}
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 0e734802ee7c..c1d20818e649 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -42,7 +42,6 @@
#include <linux/zorro.h>
-
#define Z2MINOR_COMBINED (0)
#define Z2MINOR_Z2ONLY (1)
#define Z2MINOR_CHIPONLY (2)
@@ -50,28 +49,28 @@
#define Z2MINOR_MEMLIST2 (5)
#define Z2MINOR_MEMLIST3 (6)
#define Z2MINOR_MEMLIST4 (7)
-#define Z2MINOR_COUNT (8) /* Move this down when adding a new minor */
+#define Z2MINOR_COUNT (8) /* Move this down when adding a new minor */
#define Z2RAM_CHUNK1024 ( Z2RAM_CHUNKSIZE >> 10 )
static DEFINE_MUTEX(z2ram_mutex);
-static u_long *z2ram_map = NULL;
-static u_long z2ram_size = 0;
-static int z2_count = 0;
-static int chip_count = 0;
-static int list_count = 0;
-static int current_device = -1;
+static u_long *z2ram_map = NULL;
+static u_long z2ram_size = 0;
+static int z2_count = 0;
+static int chip_count = 0;
+static int list_count = 0;
+static int current_device = -1;
static DEFINE_SPINLOCK(z2ram_lock);
-static struct gendisk *z2ram_gendisk;
+static struct gendisk *z2ram_gendisk[Z2MINOR_COUNT];
static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req = bd->rq;
unsigned long start = blk_rq_pos(req) << 9;
- unsigned long len = blk_rq_cur_bytes(req);
+ unsigned long len = blk_rq_cur_bytes(req);
blk_mq_start_request(req);
@@ -92,7 +91,7 @@ static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
if (len < size)
size = len;
- addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
+ addr += z2ram_map[start >> Z2RAM_CHUNKSHIFT];
if (rq_data_dir(req) == READ)
memcpy(buffer, (char *)addr, size);
else
@@ -106,323 +105,319 @@ static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void
-get_z2ram( void )
+static void get_z2ram(void)
{
- int i;
-
- for ( i = 0; i < Z2RAM_SIZE / Z2RAM_CHUNKSIZE; i++ )
- {
- if ( test_bit( i, zorro_unused_z2ram ) )
- {
- z2_count++;
- z2ram_map[z2ram_size++] = (unsigned long)ZTWO_VADDR(Z2RAM_START) +
- (i << Z2RAM_CHUNKSHIFT);
- clear_bit( i, zorro_unused_z2ram );
+ int i;
+
+ for (i = 0; i < Z2RAM_SIZE / Z2RAM_CHUNKSIZE; i++) {
+ if (test_bit(i, zorro_unused_z2ram)) {
+ z2_count++;
+ z2ram_map[z2ram_size++] =
+ (unsigned long)ZTWO_VADDR(Z2RAM_START) +
+ (i << Z2RAM_CHUNKSHIFT);
+ clear_bit(i, zorro_unused_z2ram);
+ }
}
- }
- return;
+ return;
}
-static void
-get_chipram( void )
+static void get_chipram(void)
{
- while ( amiga_chip_avail() > ( Z2RAM_CHUNKSIZE * 4 ) )
- {
- chip_count++;
- z2ram_map[ z2ram_size ] =
- (u_long)amiga_chip_alloc( Z2RAM_CHUNKSIZE, "z2ram" );
+ while (amiga_chip_avail() > (Z2RAM_CHUNKSIZE * 4)) {
+ chip_count++;
+ z2ram_map[z2ram_size] =
+ (u_long) amiga_chip_alloc(Z2RAM_CHUNKSIZE, "z2ram");
+
+ if (z2ram_map[z2ram_size] == 0) {
+ break;
+ }
- if ( z2ram_map[ z2ram_size ] == 0 )
- {
- break;
+ z2ram_size++;
}
- z2ram_size++;
- }
-
- return;
+ return;
}
static int z2_open(struct block_device *bdev, fmode_t mode)
{
- int device;
- int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) *
- sizeof( z2ram_map[0] );
- int max_chip_map = ( amiga_chip_size / Z2RAM_CHUNKSIZE ) *
- sizeof( z2ram_map[0] );
- int rc = -ENOMEM;
-
- device = MINOR(bdev->bd_dev);
-
- mutex_lock(&z2ram_mutex);
- if ( current_device != -1 && current_device != device )
- {
- rc = -EBUSY;
- goto err_out;
- }
-
- if ( current_device == -1 )
- {
- z2_count = 0;
- chip_count = 0;
- list_count = 0;
- z2ram_size = 0;
-
- /* Use a specific list entry. */
- if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) {
- int index = device - Z2MINOR_MEMLIST1 + 1;
- unsigned long size, paddr, vaddr;
-
- if (index >= m68k_realnum_memory) {
- printk( KERN_ERR DEVICE_NAME
- ": no such entry in z2ram_map\n" );
- goto err_out;
- }
+ int device;
+ int max_z2_map = (Z2RAM_SIZE / Z2RAM_CHUNKSIZE) * sizeof(z2ram_map[0]);
+ int max_chip_map = (amiga_chip_size / Z2RAM_CHUNKSIZE) *
+ sizeof(z2ram_map[0]);
+ int rc = -ENOMEM;
- paddr = m68k_memory[index].addr;
- size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE-1);
+ device = MINOR(bdev->bd_dev);
-#ifdef __powerpc__
- /* FIXME: ioremap doesn't build correct memory tables. */
- {
- vfree(vmalloc (size));
- }
-
- vaddr = (unsigned long)ioremap_wt(paddr, size);
-
-#else
- vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size);
-#endif
- z2ram_map =
- kmalloc_array(size / Z2RAM_CHUNKSIZE,
- sizeof(z2ram_map[0]),
- GFP_KERNEL);
- if ( z2ram_map == NULL )
- {
- printk( KERN_ERR DEVICE_NAME
- ": cannot get mem for z2ram_map\n" );
- goto err_out;
- }
+ mutex_lock(&z2ram_mutex);
+ if (current_device != -1 && current_device != device) {
+ rc = -EBUSY;
+ goto err_out;
+ }
- while (size) {
- z2ram_map[ z2ram_size++ ] = vaddr;
- size -= Z2RAM_CHUNKSIZE;
- vaddr += Z2RAM_CHUNKSIZE;
- list_count++;
- }
+ if (current_device == -1) {
+ z2_count = 0;
+ chip_count = 0;
+ list_count = 0;
+ z2ram_size = 0;
- if ( z2ram_size != 0 )
- printk( KERN_INFO DEVICE_NAME
- ": using %iK List Entry %d Memory\n",
- list_count * Z2RAM_CHUNK1024, index );
- } else
-
- switch ( device )
- {
- case Z2MINOR_COMBINED:
-
- z2ram_map = kmalloc( max_z2_map + max_chip_map, GFP_KERNEL );
- if ( z2ram_map == NULL )
- {
- printk( KERN_ERR DEVICE_NAME
- ": cannot get mem for z2ram_map\n" );
- goto err_out;
- }
+ /* Use a specific list entry. */
+ if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) {
+ int index = device - Z2MINOR_MEMLIST1 + 1;
+ unsigned long size, paddr, vaddr;
- get_z2ram();
- get_chipram();
-
- if ( z2ram_size != 0 )
- printk( KERN_INFO DEVICE_NAME
- ": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n",
- z2_count * Z2RAM_CHUNK1024,
- chip_count * Z2RAM_CHUNK1024,
- ( z2_count + chip_count ) * Z2RAM_CHUNK1024 );
-
- break;
-
- case Z2MINOR_Z2ONLY:
- z2ram_map = kmalloc( max_z2_map, GFP_KERNEL );
- if ( z2ram_map == NULL )
- {
- printk( KERN_ERR DEVICE_NAME
- ": cannot get mem for z2ram_map\n" );
- goto err_out;
- }
+ if (index >= m68k_realnum_memory) {
+ printk(KERN_ERR DEVICE_NAME
+ ": no such entry in z2ram_map\n");
+ goto err_out;
+ }
- get_z2ram();
+ paddr = m68k_memory[index].addr;
+ size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE - 1);
- if ( z2ram_size != 0 )
- printk( KERN_INFO DEVICE_NAME
- ": using %iK of Zorro II RAM\n",
- z2_count * Z2RAM_CHUNK1024 );
+#ifdef __powerpc__
+ /* FIXME: ioremap doesn't build correct memory tables. */
+ {
+ vfree(vmalloc(size));
+ }
- break;
+ vaddr = (unsigned long)ioremap_wt(paddr, size);
- case Z2MINOR_CHIPONLY:
- z2ram_map = kmalloc( max_chip_map, GFP_KERNEL );
- if ( z2ram_map == NULL )
- {
- printk( KERN_ERR DEVICE_NAME
- ": cannot get mem for z2ram_map\n" );
- goto err_out;
+#else
+ vaddr =
+ (unsigned long)z_remap_nocache_nonser(paddr, size);
+#endif
+ z2ram_map =
+ kmalloc_array(size / Z2RAM_CHUNKSIZE,
+ sizeof(z2ram_map[0]), GFP_KERNEL);
+ if (z2ram_map == NULL) {
+ printk(KERN_ERR DEVICE_NAME
+ ": cannot get mem for z2ram_map\n");
+ goto err_out;
+ }
+
+ while (size) {
+ z2ram_map[z2ram_size++] = vaddr;
+ size -= Z2RAM_CHUNKSIZE;
+ vaddr += Z2RAM_CHUNKSIZE;
+ list_count++;
+ }
+
+ if (z2ram_size != 0)
+ printk(KERN_INFO DEVICE_NAME
+ ": using %iK List Entry %d Memory\n",
+ list_count * Z2RAM_CHUNK1024, index);
+ } else
+ switch (device) {
+ case Z2MINOR_COMBINED:
+
+ z2ram_map =
+ kmalloc(max_z2_map + max_chip_map,
+ GFP_KERNEL);
+ if (z2ram_map == NULL) {
+ printk(KERN_ERR DEVICE_NAME
+ ": cannot get mem for z2ram_map\n");
+ goto err_out;
+ }
+
+ get_z2ram();
+ get_chipram();
+
+ if (z2ram_size != 0)
+ printk(KERN_INFO DEVICE_NAME
+ ": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n",
+ z2_count * Z2RAM_CHUNK1024,
+ chip_count * Z2RAM_CHUNK1024,
+ (z2_count +
+ chip_count) * Z2RAM_CHUNK1024);
+
+ break;
+
+ case Z2MINOR_Z2ONLY:
+ z2ram_map = kmalloc(max_z2_map, GFP_KERNEL);
+ if (z2ram_map == NULL) {
+ printk(KERN_ERR DEVICE_NAME
+ ": cannot get mem for z2ram_map\n");
+ goto err_out;
+ }
+
+ get_z2ram();
+
+ if (z2ram_size != 0)
+ printk(KERN_INFO DEVICE_NAME
+ ": using %iK of Zorro II RAM\n",
+ z2_count * Z2RAM_CHUNK1024);
+
+ break;
+
+ case Z2MINOR_CHIPONLY:
+ z2ram_map = kmalloc(max_chip_map, GFP_KERNEL);
+ if (z2ram_map == NULL) {
+ printk(KERN_ERR DEVICE_NAME
+ ": cannot get mem for z2ram_map\n");
+ goto err_out;
+ }
+
+ get_chipram();
+
+ if (z2ram_size != 0)
+ printk(KERN_INFO DEVICE_NAME
+ ": using %iK Chip RAM\n",
+ chip_count * Z2RAM_CHUNK1024);
+
+ break;
+
+ default:
+ rc = -ENODEV;
+ goto err_out;
+
+ break;
+ }
+
+ if (z2ram_size == 0) {
+ printk(KERN_NOTICE DEVICE_NAME
+ ": no unused ZII/Chip RAM found\n");
+ goto err_out_kfree;
}
- get_chipram();
-
- if ( z2ram_size != 0 )
- printk( KERN_INFO DEVICE_NAME
- ": using %iK Chip RAM\n",
- chip_count * Z2RAM_CHUNK1024 );
-
- break;
-
- default:
- rc = -ENODEV;
- goto err_out;
-
- break;
- }
-
- if ( z2ram_size == 0 )
- {
- printk( KERN_NOTICE DEVICE_NAME
- ": no unused ZII/Chip RAM found\n" );
- goto err_out_kfree;
+ current_device = device;
+ z2ram_size <<= Z2RAM_CHUNKSHIFT;
+ set_capacity(z2ram_gendisk[device], z2ram_size >> 9);
}
- current_device = device;
- z2ram_size <<= Z2RAM_CHUNKSHIFT;
- set_capacity(z2ram_gendisk, z2ram_size >> 9);
- }
-
- mutex_unlock(&z2ram_mutex);
- return 0;
+ mutex_unlock(&z2ram_mutex);
+ return 0;
err_out_kfree:
- kfree(z2ram_map);
+ kfree(z2ram_map);
err_out:
- mutex_unlock(&z2ram_mutex);
- return rc;
+ mutex_unlock(&z2ram_mutex);
+ return rc;
}
-static void
-z2_release(struct gendisk *disk, fmode_t mode)
+static void z2_release(struct gendisk *disk, fmode_t mode)
{
- mutex_lock(&z2ram_mutex);
- if ( current_device == -1 ) {
- mutex_unlock(&z2ram_mutex);
- return;
- }
- mutex_unlock(&z2ram_mutex);
- /*
- * FIXME: unmap memory
- */
+ mutex_lock(&z2ram_mutex);
+ if (current_device == -1) {
+ mutex_unlock(&z2ram_mutex);
+ return;
+ }
+ mutex_unlock(&z2ram_mutex);
+ /*
+ * FIXME: unmap memory
+ */
}
-static const struct block_device_operations z2_fops =
-{
- .owner = THIS_MODULE,
- .open = z2_open,
- .release = z2_release,
+static const struct block_device_operations z2_fops = {
+ .owner = THIS_MODULE,
+ .open = z2_open,
+ .release = z2_release,
};
-static struct kobject *z2_find(dev_t dev, int *part, void *data)
-{
- *part = 0;
- return get_disk_and_module(z2ram_gendisk);
-}
-
-static struct request_queue *z2_queue;
static struct blk_mq_tag_set tag_set;
static const struct blk_mq_ops z2_mq_ops = {
- .queue_rq = z2_queue_rq,
+ .queue_rq = z2_queue_rq,
};
-static int __init
-z2_init(void)
+static int z2ram_register_disk(int minor)
{
- int ret;
-
- if (!MACH_IS_AMIGA)
- return -ENODEV;
-
- ret = -EBUSY;
- if (register_blkdev(Z2RAM_MAJOR, DEVICE_NAME))
- goto err;
-
- ret = -ENOMEM;
- z2ram_gendisk = alloc_disk(1);
- if (!z2ram_gendisk)
- goto out_disk;
-
- z2_queue = blk_mq_init_sq_queue(&tag_set, &z2_mq_ops, 16,
- BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(z2_queue)) {
- ret = PTR_ERR(z2_queue);
- z2_queue = NULL;
- goto out_queue;
- }
-
- z2ram_gendisk->major = Z2RAM_MAJOR;
- z2ram_gendisk->first_minor = 0;
- z2ram_gendisk->fops = &z2_fops;
- sprintf(z2ram_gendisk->disk_name, "z2ram");
-
- z2ram_gendisk->queue = z2_queue;
- add_disk(z2ram_gendisk);
- blk_register_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT, THIS_MODULE,
- z2_find, NULL, NULL);
-
- return 0;
-
-out_queue:
- put_disk(z2ram_gendisk);
-out_disk:
- unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
-err:
- return ret;
+ struct request_queue *q;
+ struct gendisk *disk;
+
+ disk = alloc_disk(1);
+ if (!disk)
+ return -ENOMEM;
+
+ q = blk_mq_init_queue(&tag_set);
+ if (IS_ERR(q)) {
+ put_disk(disk);
+ return PTR_ERR(q);
+ }
+
+ disk->major = Z2RAM_MAJOR;
+ disk->first_minor = minor;
+ disk->fops = &z2_fops;
+ if (minor)
+ sprintf(disk->disk_name, "z2ram%d", minor);
+ else
+ sprintf(disk->disk_name, "z2ram");
+ disk->queue = q;
+
+ z2ram_gendisk[minor] = disk;
+ add_disk(disk);
+ return 0;
}
-static void __exit z2_exit(void)
+static int __init z2_init(void)
{
- int i, j;
- blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT);
- unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
- del_gendisk(z2ram_gendisk);
- put_disk(z2ram_gendisk);
- blk_cleanup_queue(z2_queue);
- blk_mq_free_tag_set(&tag_set);
-
- if ( current_device != -1 )
- {
- i = 0;
-
- for ( j = 0 ; j < z2_count; j++ )
- {
- set_bit( i++, zorro_unused_z2ram );
+ int ret, i;
+
+ if (!MACH_IS_AMIGA)
+ return -ENODEV;
+
+ if (register_blkdev(Z2RAM_MAJOR, DEVICE_NAME))
+ return -EBUSY;
+
+ tag_set.ops = &z2_mq_ops;
+ tag_set.nr_hw_queues = 1;
+ tag_set.nr_maps = 1;
+ tag_set.queue_depth = 16;
+ tag_set.numa_node = NUMA_NO_NODE;
+ tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ret = blk_mq_alloc_tag_set(&tag_set);
+ if (ret)
+ goto out_unregister_blkdev;
+
+ for (i = 0; i < Z2MINOR_COUNT; i++) {
+ ret = z2ram_register_disk(i);
+ if (ret && i == 0)
+ goto out_free_tagset;
}
- for ( j = 0 ; j < chip_count; j++ )
- {
- if ( z2ram_map[ i ] )
- {
- amiga_chip_free( (void *) z2ram_map[ i++ ] );
- }
+ return 0;
+
+out_free_tagset:
+ blk_mq_free_tag_set(&tag_set);
+out_unregister_blkdev:
+ unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
+ return ret;
+}
+
+static void __exit z2_exit(void)
+{
+ int i, j;
+
+ unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
+
+ for (i = 0; i < Z2MINOR_COUNT; i++) {
+ del_gendisk(z2ram_gendisk[i]);
+ blk_cleanup_queue(z2ram_gendisk[i]->queue);
+ put_disk(z2ram_gendisk[i]);
}
+ blk_mq_free_tag_set(&tag_set);
+
+ if (current_device != -1) {
+ i = 0;
- if ( z2ram_map != NULL )
- {
- kfree( z2ram_map );
+ for (j = 0; j < z2_count; j++) {
+ set_bit(i++, zorro_unused_z2ram);
+ }
+
+ for (j = 0; j < chip_count; j++) {
+ if (z2ram_map[i]) {
+ amiga_chip_free((void *)z2ram_map[i++]);
+ }
+ }
+
+ if (z2ram_map != NULL) {
+ kfree(z2ram_map);
+ }
}
- }
- return;
-}
+ return;
+}
module_init(z2_init);
module_exit(z2_exit);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 66a33e418940..e2933cb7a82a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -403,13 +403,10 @@ static void reset_bdev(struct zram *zram)
return;
bdev = zram->bdev;
- if (zram->old_block_size)
- set_blocksize(bdev, zram->old_block_size);
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
- zram->old_block_size = 0;
zram->bdev = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
@@ -454,7 +451,7 @@ static ssize_t backing_dev_store(struct device *dev,
struct file *backing_dev = NULL;
struct inode *inode;
struct address_space *mapping;
- unsigned int bitmap_sz, old_block_size = 0;
+ unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
struct block_device *bdev = NULL;
int err;
@@ -509,14 +506,8 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- old_block_size = block_size(bdev);
- err = set_blocksize(bdev, PAGE_SIZE);
- if (err)
- goto out;
-
reset_bdev(zram);
- zram->old_block_size = old_block_size;
zram->bdev = bdev;
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
@@ -1710,8 +1701,8 @@ static void zram_reset_device(struct zram *zram)
disksize = zram->disksize;
zram->disksize = 0;
- set_capacity(zram->disk, 0);
- part_stat_set_all(&zram->disk->part0, 0);
+ set_capacity_and_notify(zram->disk, 0);
+ part_stat_set_all(zram->disk->part0, 0);
up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */
@@ -1756,9 +1747,7 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
- set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
-
- revalidate_disk_size(zram->disk, true);
+ set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
up_write(&zram->init_lock);
return len;
@@ -1786,15 +1775,12 @@ static ssize_t reset_store(struct device *dev,
return -EINVAL;
zram = dev_to_zram(dev);
- bdev = bdget_disk(zram->disk, 0);
- if (!bdev)
- return -ENOMEM;
+ bdev = zram->disk->part0;
mutex_lock(&bdev->bd_mutex);
/* Do not reset an active device or claimed device */
if (bdev->bd_openers || zram->claim) {
mutex_unlock(&bdev->bd_mutex);
- bdput(bdev);
return -EBUSY;
}
@@ -1805,8 +1791,6 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- revalidate_disk_size(zram->disk, true);
- bdput(bdev);
mutex_lock(&bdev->bd_mutex);
zram->claim = false;
@@ -1992,16 +1976,11 @@ out_free_dev:
static int zram_remove(struct zram *zram)
{
- struct block_device *bdev;
-
- bdev = bdget_disk(zram->disk, 0);
- if (!bdev)
- return -ENOMEM;
+ struct block_device *bdev = zram->disk->part0;
mutex_lock(&bdev->bd_mutex);
if (bdev->bd_openers || zram->claim) {
mutex_unlock(&bdev->bd_mutex);
- bdput(bdev);
return -EBUSY;
}
@@ -2013,7 +1992,6 @@ static int zram_remove(struct zram *zram)
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- bdput(bdev);
pr_info("Removed device: %s\n", zram->disk->disk_name);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 9cabcbb13fd9..419a7e8281ee 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -119,7 +119,6 @@ struct zram {
bool wb_limit_enable;
u64 bd_wb_limit;
struct block_device *bdev;
- unsigned int old_block_size;
unsigned long *bitmap;
unsigned long nr_pages;
#endif
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
index 845b6c43fef8..2344d560b144 100644
--- a/drivers/bus/arm-integrator-lm.c
+++ b/drivers/bus/arm-integrator-lm.c
@@ -54,6 +54,7 @@ static int integrator_lm_populate(int num, struct device *dev)
ret = of_platform_default_populate(child, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate module\n");
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 9f7ed1fcd428..626dedd110cb 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -559,10 +559,8 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
++id;
ret = device_register(&dev->dev);
- if (ret) {
+ if (ret)
put_device(&dev->dev);
- kfree(dev);
- }
}
}
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index c5eb46cbf388..01a3d0cd08ed 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -16,6 +16,7 @@
static int simple_pm_bus_probe(struct platform_device *pdev)
{
+ const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -23,7 +24,7 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (np)
- of_platform_populate(np, NULL, NULL, &pdev->dev);
+ of_platform_populate(np, NULL, lookup, &pdev->dev);
return 0;
}
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 92ecf1a78ec7..a27d751cf219 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -853,8 +853,12 @@ static int sysc_ioremap(struct sysc *ddata)
*/
static int sysc_map_and_check_registers(struct sysc *ddata)
{
+ struct device_node *np = ddata->dev->of_node;
int error;
+ if (!of_get_property(np, "reg", NULL))
+ return 0;
+
error = sysc_parse_and_check_child_range(ddata);
if (error)
return error;
@@ -1222,10 +1226,10 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev)
ddata->enabled = false;
err_allow_idle:
- reset_control_assert(ddata->rsts);
-
sysc_clkdm_allow_idle(ddata);
+ reset_control_assert(ddata->rsts);
+
return error;
}
@@ -1379,6 +1383,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
+ SYSC_QUIRK_GPMC_DEBUG),
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_NEEDED),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
@@ -1814,6 +1820,14 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
}
+#ifdef CONFIG_OMAP_GPMC_DEBUG
+ if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
+ ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
+
+ return;
+ }
+#endif
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
@@ -1945,6 +1959,7 @@ static int sysc_reset(struct sysc *ddata)
*/
static int sysc_init_module(struct sysc *ddata)
{
+ bool rstctrl_deasserted = false;
int error = 0;
error = sysc_clockdomain_init(ddata);
@@ -1969,6 +1984,7 @@ static int sysc_init_module(struct sysc *ddata)
error = reset_control_deassert(ddata->rsts);
if (error)
goto err_main_clocks;
+ rstctrl_deasserted = true;
}
ddata->revision = sysc_read_revision(ddata);
@@ -1978,13 +1994,13 @@ static int sysc_init_module(struct sysc *ddata)
if (ddata->legacy_mode) {
error = sysc_legacy_init(ddata);
if (error)
- goto err_reset;
+ goto err_main_clocks;
}
if (!ddata->legacy_mode) {
error = sysc_enable_module(ddata->dev);
if (error)
- goto err_reset;
+ goto err_main_clocks;
}
error = sysc_reset(ddata);
@@ -1994,10 +2010,6 @@ static int sysc_init_module(struct sysc *ddata)
if (error && !ddata->legacy_mode)
sysc_disable_module(ddata->dev);
-err_reset:
- if (error && !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
- reset_control_assert(ddata->rsts);
-
err_main_clocks:
if (error)
sysc_disable_main_clocks(ddata);
@@ -2008,6 +2020,10 @@ err_opt_clocks:
sysc_clkdm_allow_idle(ddata);
}
+ if (error && rstctrl_deasserted &&
+ !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
return error;
}
@@ -2909,6 +2925,9 @@ static int sysc_probe(struct platform_device *pdev)
if (!ddata)
return -ENOMEM;
+ ddata->offsets[SYSC_REVISION] = -ENODEV;
+ ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
+ ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
@@ -2975,9 +2994,6 @@ static int sysc_probe(struct platform_device *pdev)
}
/* Balance use counts as PM runtime should have enabled these all */
- if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
- reset_control_assert(ddata->rsts);
-
if (!(ddata->cfg.quirks &
(SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
sysc_disable_main_clocks(ddata);
@@ -2985,6 +3001,9 @@ static int sysc_probe(struct platform_device *pdev)
sysc_clkdm_allow_idle(ddata);
}
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
sysc_show_registers(ddata);
ddata->dev->type = &sysc_device_type;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 0c271b9e3c5b..8f0e52a71493 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2996,13 +2996,15 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
* SCSI-II devices are not required to support
* READ_CD, so let's try switching block size
*/
- /* FIXME: switch back again... */
- ret = cdrom_switch_blocksize(cdi, blocksize);
- if (ret)
- goto out;
+ if (blocksize != CD_FRAMESIZE) {
+ ret = cdrom_switch_blocksize(cdi, blocksize);
+ if (ret)
+ goto out;
+ }
cgc->sshdr = NULL;
ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
- ret |= cdrom_switch_blocksize(cdi, blocksize);
+ if (blocksize != CD_FRAMESIZE)
+ ret |= cdrom_switch_blocksize(cdi, CD_FRAMESIZE);
}
if (!ret && copy_to_user(arg, cgc->buffer, blocksize))
ret = -EFAULT;
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index cb2497d157f6..90ed8c789e48 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-agpgart-y := backend.o frontend.o generic.o isoch.o
+agpgart-y := backend.o generic.o isoch.o
+ifeq ($(CONFIG_DRM_LEGACY),y)
agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
+agpgart-y += frontend.o
+endif
+
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP_ALI) += ali-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 4eb1c772ded7..bb09d64cd51e 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -186,8 +186,13 @@ int agp_add_bridge(struct agp_bridge_data *bridge);
void agp_remove_bridge(struct agp_bridge_data *bridge);
/* Frontend routines. */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int agp_frontend_initialize(void);
void agp_frontend_cleanup(void);
+#else
+static inline int agp_frontend_initialize(void) { return 0; }
+static inline void agp_frontend_cleanup(void) {}
+#endif
/* Generic routines. */
void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 17c1df8c909a..1fe006f3f12f 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -528,15 +528,15 @@ endif # HW_RANDOM
config UML_RANDOM
depends on UML
- tristate "Hardware random number generator"
+ select HW_RANDOM
+ tristate "UML Random Number Generator support"
help
This option enables UML's "hardware" random number generator. It
attaches itself to the host's /dev/random, supplying as much entropy
as the host has, rather than the small amount the UML gets from its
- own drivers. It registers itself as a standard hardware random number
- generator, major 10, minor 183, and the canonical device name is
- /dev/hwrng.
- The way to make use of this is to install the rng-tools package
- (check your distro, or download from
- http://sourceforge.net/projects/gkernel/). rngd periodically reads
- /dev/hwrng and injects the entropy into /dev/random.
+ own drivers. It registers itself as a rng-core driver thus providing
+ a device which is usually called /dev/hwrng. This hardware random
+ number generator does feed into the kernel's random number generator
+ entropy pool.
+
+ If unsure, say Y.
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index a395e2e70dc5..6e3d247b55d1 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -462,9 +462,9 @@ static int bt_bmc_probe(struct platform_device *pdev)
mutex_init(&bt_bmc->mutex);
init_waitqueue_head(&bt_bmc->queue);
- bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
- bt_bmc->miscdev.name = DEVICE_NAME,
- bt_bmc->miscdev.fops = &bt_bmc_fops,
+ bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
+ bt_bmc->miscdev.name = DEVICE_NAME;
+ bt_bmc->miscdev.fops = &bt_bmc_fops;
bt_bmc->miscdev.parent = dev;
rc = misc_register(&bt_bmc->miscdev);
if (rc) {
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index f7b1c004a12b..3dd1d5abb298 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -490,7 +490,6 @@ static long ipmi_ioctl(struct file *file,
}
return ipmi_set_my_address(priv->user, val.channel, val.value);
- break;
}
case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8774a3b8ff95..c44ad18464f1 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -747,7 +747,8 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
list_add(&watcher->link, &smi_watchers);
index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
+ lockdep_is_held(&smi_watchers_mutex)) {
int intf_num = READ_ONCE(intf->intf_num);
if (intf_num == -1)
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f78156d93c3f..32c334e34d55 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -495,7 +495,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(1, &panic_done_count);
+ atomic_inc(&panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -505,7 +505,7 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_recv_msg,
1);
if (rv)
- atomic_sub(1, &panic_done_count);
+ atomic_dec(&panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -529,12 +529,12 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
- atomic_add(1, &panic_done_count);
+ atomic_inc(&panic_done_count);
rv = __ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
if (rv) {
- atomic_sub(1, &panic_done_count);
+ atomic_dec(&panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 1a07fee33f66..23871cde41fb 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -403,7 +403,7 @@ fail:
return error;
}
-static int ps3flash_remove(struct ps3_system_bus_device *_dev)
+static void ps3flash_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
@@ -413,7 +413,6 @@ static int ps3flash_remove(struct ps3_system_bus_device *_dev)
kfree(ps3_system_bus_get_drvdata(&dev->sbd));
ps3_system_bus_set_drvdata(&dev->sbd, NULL);
ps3flash_dev = NULL;
- return 0;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index c715d4681a0b..85856cff506c 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -188,6 +188,14 @@ config COMMON_CLK_CS2000_CP
help
If you say yes here you get support for the CS2000 clock multiplier.
+config COMMON_CLK_FSL_FLEXSPI
+ tristate "Clock driver for FlexSPI on Layerscape SoCs"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ default ARCH_LAYERSCAPE && SPI_NXP_FLEXSPI
+ help
+ On Layerscape SoCs there is a special clock for the FlexSPI
+ interface.
+
config COMMON_CLK_FSL_SAI
bool "Clock driver for BCLK of Freescale SAI cores"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
@@ -246,7 +254,8 @@ config COMMON_CLK_AXI_CLKGEN
config CLK_QORIQ
bool "Clock driver for Freescale QorIQ platforms"
- depends on (PPC_E500MC || ARM || ARM64 || COMPILE_TEST) && OF
+ depends on OF
+ depends on PPC_E500MC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
help
This adds the clock driver support for Freescale QorIQ platforms
using common clock framework.
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index da8fcf147eb1..dbdc590e7de3 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
+obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o
obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index 2c3d8e6ca63c..0fad1009f315 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(rm9200_mck_lock);
+
struct sck {
char *n;
char *p;
@@ -137,9 +139,20 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "pllack";
parent_names[3] = "pllbck";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91rm9200_master_layout,
- &rm9200_mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91rm9200_master_layout,
+ &rm9200_mck_characteristics,
+ &rm9200_mck_lock, CLK_SET_RATE_GATE,
+ INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91rm9200_master_layout,
+ &rm9200_mck_characteristics,
+ &rm9200_mck_lock, CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -181,7 +194,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91rm9200_periphck); i++) {
hw = at91_clk_register_peripheral(regmap,
at91rm9200_periphck[i].n,
- "masterck",
+ "masterck_div",
at91rm9200_periphck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index bb81ff731ad8..ceb5495f723a 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -32,6 +32,8 @@ struct at91sam926x_data {
bool has_slck;
};
+static DEFINE_SPINLOCK(at91sam9260_mck_lock);
+
static const struct clk_master_characteristics sam9260_mck_characteristics = {
.output = { .min = 0, .max = 105000000 },
.divisors = { 1, 2, 4, 0 },
@@ -218,8 +220,8 @@ static const struct sck at91sam9261_systemck[] = {
{ .n = "pck1", .p = "prog1", .id = 9 },
{ .n = "pck2", .p = "prog2", .id = 10 },
{ .n = "pck3", .p = "prog3", .id = 11 },
- { .n = "hclk0", .p = "masterck", .id = 16 },
- { .n = "hclk1", .p = "masterck", .id = 17 },
+ { .n = "hclk0", .p = "masterck_div", .id = 16 },
+ { .n = "hclk1", .p = "masterck_div", .id = 17 },
};
static const struct pck at91sam9261_periphck[] = {
@@ -413,9 +415,21 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "pllack";
parent_names[3] = "pllbck";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91rm9200_master_layout,
- data->mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91rm9200_master_layout,
+ data->mck_characteristics,
+ &at91sam9260_mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91rm9200_master_layout,
+ data->mck_characteristics,
+ &at91sam9260_mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -457,7 +471,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
for (i = 0; i < data->num_pck; i++) {
hw = at91_clk_register_peripheral(regmap,
data->pck[i].n,
- "masterck",
+ "masterck_div",
data->pck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index cb4a406ed15d..0214333dedd3 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(at91sam9g45_mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 0, .max = 133333333 },
.divisors = { 1, 2, 4, 3 },
@@ -40,10 +42,10 @@ static const struct {
char *p;
u8 id;
} at91sam9g45_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
};
struct pck {
@@ -148,9 +150,21 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91rm9200_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91rm9200_master_layout,
+ &mck_characteristics,
+ &at91sam9g45_mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91rm9200_master_layout,
+ &mck_characteristics,
+ &at91sam9g45_mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -166,7 +180,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 2; i++) {
char name[6];
@@ -195,7 +209,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9g45_periphck); i++) {
hw = at91_clk_register_peripheral(regmap,
at91sam9g45_periphck[i].n,
- "masterck",
+ "masterck_div",
at91sam9g45_periphck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index 93f7eb216122..f9db5316a7f1 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(at91sam9n12_mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 0, .max = 133333333 },
.divisors = { 1, 2, 4, 3 },
@@ -54,12 +56,12 @@ static const struct {
char *p;
u8 id;
} at91sam9n12_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "lcdck", .p = "masterck", .id = 3 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "udpck", .p = "usbck", .id = 7 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "lcdck", .p = "masterck_div", .id = 3 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
};
static const struct clk_pcr_layout at91sam9n12_pcr_layout = {
@@ -175,9 +177,21 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "pllbck";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics,
+ &at91sam9n12_mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics,
+ &at91sam9n12_mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -191,7 +205,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "pllbck";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 2; i++) {
char name[6];
@@ -221,7 +235,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&at91sam9n12_pcr_layout,
at91sam9n12_periphck[i].n,
- "masterck",
+ "masterck_div",
at91sam9n12_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index a343eb69bb35..66736e03cfef 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(sam9rl_mck_lock);
+
static const struct clk_master_characteristics sam9rl_mck_characteristics = {
.output = { .min = 0, .max = 94000000 },
.divisors = { 1, 2, 4, 0 },
@@ -117,9 +119,20 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "pllack";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91rm9200_master_layout,
- &sam9rl_mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91rm9200_master_layout,
+ &sam9rl_mck_characteristics,
+ &sam9rl_mck_lock, CLK_SET_RATE_GATE,
+ INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91rm9200_master_layout,
+ &sam9rl_mck_characteristics,
+ &sam9rl_mck_lock, CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -129,7 +142,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "pllack";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 2; i++) {
char name[6];
@@ -158,7 +171,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9rl_periphck); i++) {
hw = at91_clk_register_peripheral(regmap,
at91sam9rl_periphck[i].n,
- "masterck",
+ "masterck_div",
at91sam9rl_periphck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 22b9aad9efb8..79b9d3667228 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 0, .max = 133333333 },
.divisors = { 1, 2, 4, 3 },
@@ -41,7 +43,7 @@ static const struct {
char *p;
u8 id;
} at91sam9x5_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
{ .n = "smdck", .p = "smdclk", .id = 4 },
{ .n = "uhpck", .p = "usbck", .id = 6 },
{ .n = "udpck", .p = "usbck", .id = 7 },
@@ -196,9 +198,19 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -218,7 +230,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 2; i++) {
char name[6];
@@ -245,7 +257,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
}
if (has_lcdck) {
- hw = at91_clk_register_system(regmap, "lcdck", "masterck", 3);
+ hw = at91_clk_register_system(regmap, "lcdck", "masterck_div", 3);
if (IS_ERR(hw))
goto err_free;
@@ -256,7 +268,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&at91sam9x5_pcr_layout,
at91sam9x5_periphck[i].n,
- "masterck",
+ "masterck_div",
at91sam9x5_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
@@ -269,7 +281,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&at91sam9x5_pcr_layout,
extra_pcks[i].n,
- "masterck",
+ "masterck_div",
extra_pcks[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index bd0d8a69a2cf..a80427980bf7 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -15,7 +15,7 @@
#define MASTER_PRES_MASK 0x7
#define MASTER_PRES_MAX MASTER_PRES_MASK
#define MASTER_DIV_SHIFT 8
-#define MASTER_DIV_MASK 0x3
+#define MASTER_DIV_MASK 0x7
#define PMC_MCR 0x30
#define PMC_MCR_ID_MSK GENMASK(3, 0)
@@ -58,83 +58,309 @@ static inline bool clk_master_ready(struct clk_master *master)
static int clk_master_prepare(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(master->lock, flags);
while (!clk_master_ready(master))
cpu_relax();
+ spin_unlock_irqrestore(master->lock, flags);
+
return 0;
}
static int clk_master_is_prepared(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ bool status;
- return clk_master_ready(master);
+ spin_lock_irqsave(master->lock, flags);
+ status = clk_master_ready(master);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return status;
}
-static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long clk_master_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- u8 pres;
u8 div;
- unsigned long rate = parent_rate;
+ unsigned long flags, rate = parent_rate;
struct clk_master *master = to_clk_master(hw);
const struct clk_master_layout *layout = master->layout;
const struct clk_master_characteristics *characteristics =
master->characteristics;
unsigned int mckr;
+ spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, flags);
+
mckr &= layout->mask;
- pres = (mckr >> layout->pres_shift) & MASTER_PRES_MASK;
div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
- if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX)
- rate /= 3;
- else
- rate >>= pres;
-
rate /= characteristics->divisors[div];
if (rate < characteristics->output.min)
- pr_warn("master clk is underclocked");
+ pr_warn("master clk div is underclocked");
else if (rate > characteristics->output.max)
- pr_warn("master clk is overclocked");
+ pr_warn("master clk div is overclocked");
return rate;
}
-static u8 clk_master_get_parent(struct clk_hw *hw)
+static const struct clk_ops master_div_ops = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .recalc_rate = clk_master_div_recalc_rate,
+};
+
+static int clk_master_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ unsigned long flags;
+ int div, i;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (div > ARRAY_SIZE(characteristics->divisors))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
+ if (!characteristics->divisors[i])
+ break;
+
+ if (div == characteristics->divisors[i]) {
+ div = i;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(characteristics->divisors))
+ return -EINVAL;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_update_bits(master->regmap, master->layout->offset,
+ (MASTER_DIV_MASK << MASTER_DIV_SHIFT),
+ (div << MASTER_DIV_SHIFT));
+ while (!clk_master_ready(master))
+ cpu_relax();
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return 0;
+}
+
+static int clk_master_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_master *master = to_clk_master(hw);
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ struct clk_hw *parent;
+ unsigned long parent_rate, tmp_rate, best_rate = 0;
+ int i, best_diff = INT_MIN, tmp_diff;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent)
+ return -EINVAL;
+
+ parent_rate = clk_hw_get_rate(parent);
+ if (!parent_rate)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
+ if (!characteristics->divisors[i])
+ break;
+
+ tmp_rate = DIV_ROUND_CLOSEST_ULL(parent_rate,
+ characteristics->divisors[i]);
+ tmp_diff = abs(tmp_rate - req->rate);
+
+ if (!best_rate || best_diff > tmp_diff) {
+ best_diff = tmp_diff;
+ best_rate = tmp_rate;
+ }
+
+ if (!best_diff)
+ break;
+ }
+
+ req->best_parent_rate = best_rate;
+ req->best_parent_hw = parent;
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static const struct clk_ops master_div_ops_chg = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .recalc_rate = clk_master_div_recalc_rate,
+ .determine_rate = clk_master_div_determine_rate,
+ .set_rate = clk_master_div_set_rate,
+};
+
+static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
+ struct clk_hw *parent,
+ unsigned long parent_rate,
+ long *best_rate,
+ long *best_diff,
+ u32 div)
+{
+ unsigned long tmp_rate, tmp_diff;
+
+ if (div == MASTER_PRES_MAX)
+ tmp_rate = parent_rate / 3;
+ else
+ tmp_rate = parent_rate >> div;
+
+ tmp_diff = abs(req->rate - tmp_rate);
+
+ if (*best_diff < 0 || *best_diff >= tmp_diff) {
+ *best_rate = tmp_rate;
+ *best_diff = tmp_diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+}
+
+static int clk_master_pres_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_master *master = to_clk_master(hw);
+ struct clk_rate_request req_parent = *req;
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ struct clk_hw *parent;
+ long best_rate = LONG_MIN, best_diff = LONG_MIN;
+ u32 pres;
+ int i;
+
+ if (master->chg_pid < 0)
+ return -EOPNOTSUPP;
+
+ parent = clk_hw_get_parent_by_index(hw, master->chg_pid);
+ if (!parent)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i <= MASTER_PRES_MAX; i++) {
+ if (characteristics->have_div3_pres && i == MASTER_PRES_MAX)
+ pres = 3;
+ else
+ pres = 1 << i;
+
+ req_parent.rate = req->rate * pres;
+ if (__clk_determine_rate(parent, &req_parent))
+ continue;
+
+ clk_sama7g5_master_best_diff(req, parent, req_parent.rate,
+ &best_diff, &best_rate, pres);
+ if (!best_diff)
+ break;
+ }
+
+ return 0;
+}
+
+static int clk_master_pres_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ unsigned int pres;
+
+ pres = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (pres > MASTER_PRES_MAX)
+ return -EINVAL;
+
+ else if (pres == 3)
+ pres = MASTER_PRES_MAX;
+ else
+ pres = ffs(pres) - 1;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_update_bits(master->regmap, master->layout->offset,
+ (MASTER_PRES_MASK << master->layout->pres_shift),
+ (pres << master->layout->pres_shift));
+
+ while (!clk_master_ready(master))
+ cpu_relax();
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return 0;
+}
+
+static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ unsigned long flags;
+ unsigned int val, pres;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &val);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
+ if (pres == 3 && characteristics->have_div3_pres)
+ pres = 3;
+ else
+ pres = (1 << pres);
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, pres);
+}
+
+static u8 clk_master_pres_get_parent(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
unsigned int mckr;
+ spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, flags);
return mckr & AT91_PMC_CSS;
}
-static const struct clk_ops master_ops = {
+static const struct clk_ops master_pres_ops = {
.prepare = clk_master_prepare,
.is_prepared = clk_master_is_prepared,
- .recalc_rate = clk_master_recalc_rate,
- .get_parent = clk_master_get_parent,
+ .recalc_rate = clk_master_pres_recalc_rate,
+ .get_parent = clk_master_pres_get_parent,
};
-struct clk_hw * __init
-at91_clk_register_master(struct regmap *regmap,
+static const struct clk_ops master_pres_ops_chg = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .determine_rate = clk_master_pres_determine_rate,
+ .recalc_rate = clk_master_pres_recalc_rate,
+ .get_parent = clk_master_pres_get_parent,
+ .set_rate = clk_master_pres_set_rate,
+};
+
+static struct clk_hw * __init
+at91_clk_register_master_internal(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
const struct clk_master_layout *layout,
- const struct clk_master_characteristics *characteristics)
+ const struct clk_master_characteristics *characteristics,
+ const struct clk_ops *ops, spinlock_t *lock, u32 flags,
+ int chg_pid)
{
struct clk_master *master;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
- if (!name || !num_parents || !parent_names)
+ if (!name || !num_parents || !parent_names || !lock)
return ERR_PTR(-EINVAL);
master = kzalloc(sizeof(*master), GFP_KERNEL);
@@ -142,15 +368,17 @@ at91_clk_register_master(struct regmap *regmap,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &master_ops;
+ init.ops = ops;
init.parent_names = parent_names;
init.num_parents = num_parents;
- init.flags = 0;
+ init.flags = flags;
master->hw.init = &init;
master->layout = layout;
master->characteristics = characteristics;
master->regmap = regmap;
+ master->chg_pid = chg_pid;
+ master->lock = lock;
hw = &master->hw;
ret = clk_hw_register(NULL, &master->hw);
@@ -162,37 +390,54 @@ at91_clk_register_master(struct regmap *regmap,
return hw;
}
-static unsigned long
-clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+struct clk_hw * __init
+at91_clk_register_master_pres(struct regmap *regmap,
+ const char *name, int num_parents,
+ const char **parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics,
+ spinlock_t *lock, u32 flags, int chg_pid)
{
- struct clk_master *master = to_clk_master(hw);
+ const struct clk_ops *ops;
- return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
+ if (flags & CLK_SET_RATE_GATE)
+ ops = &master_pres_ops;
+ else
+ ops = &master_pres_ops_chg;
+
+ return at91_clk_register_master_internal(regmap, name, num_parents,
+ parent_names, layout,
+ characteristics, ops,
+ lock, flags, chg_pid);
}
-static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
- struct clk_hw *parent,
- unsigned long parent_rate,
- long *best_rate,
- long *best_diff,
- u32 div)
+struct clk_hw * __init
+at91_clk_register_master_div(struct regmap *regmap,
+ const char *name, const char *parent_name,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics,
+ spinlock_t *lock, u32 flags)
{
- unsigned long tmp_rate, tmp_diff;
+ const struct clk_ops *ops;
- if (div == MASTER_PRES_MAX)
- tmp_rate = parent_rate / 3;
+ if (flags & CLK_SET_RATE_GATE)
+ ops = &master_div_ops;
else
- tmp_rate = parent_rate >> div;
+ ops = &master_div_ops_chg;
- tmp_diff = abs(req->rate - tmp_rate);
+ return at91_clk_register_master_internal(regmap, name, 1,
+ &parent_name, layout,
+ characteristics, ops,
+ lock, flags, -EINVAL);
+}
- if (*best_diff < 0 || *best_diff >= tmp_diff) {
- *best_rate = tmp_rate;
- *best_diff = tmp_diff;
- req->best_parent_rate = parent_rate;
- req->best_parent_hw = parent;
- }
+static unsigned long
+clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
}
static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index 78f458a7b2ef..34e3ab13741a 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -229,6 +229,57 @@ static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return sam9x60_frac_pll_compute_mul_frac(core, rate, parent_rate, true);
}
+static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_frac *frac = to_sam9x60_frac(core);
+ struct regmap *regmap = core->regmap;
+ unsigned long irqflags;
+ unsigned int val, cfrac, cmul;
+ long ret;
+
+ ret = sam9x60_frac_pll_compute_mul_frac(core, rate, parent_rate, true);
+ if (ret <= 0)
+ return ret;
+
+ spin_lock_irqsave(core->lock, irqflags);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
+ cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
+ cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
+
+ if (cmul == frac->mul && cfrac == frac->frac)
+ goto unlock;
+
+ regmap_write(regmap, AT91_PMC_PLL_CTRL1,
+ (frac->mul << core->layout->mul_shift) |
+ (frac->frac << core->layout->frac_shift));
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+ AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
+ AT91_PMC_PLL_CTRL0_ENLOCK |
+ AT91_PMC_PLL_CTRL0_ENPLL);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ while (!sam9x60_pll_ready(regmap, core->id))
+ cpu_relax();
+
+unlock:
+ spin_unlock_irqrestore(core->lock, irqflags);
+
+ return ret;
+}
+
static const struct clk_ops sam9x60_frac_pll_ops = {
.prepare = sam9x60_frac_pll_prepare,
.unprepare = sam9x60_frac_pll_unprepare,
@@ -238,6 +289,15 @@ static const struct clk_ops sam9x60_frac_pll_ops = {
.set_rate = sam9x60_frac_pll_set_rate,
};
+static const struct clk_ops sam9x60_frac_pll_ops_chg = {
+ .prepare = sam9x60_frac_pll_prepare,
+ .unprepare = sam9x60_frac_pll_unprepare,
+ .is_prepared = sam9x60_frac_pll_is_prepared,
+ .recalc_rate = sam9x60_frac_pll_recalc_rate,
+ .round_rate = sam9x60_frac_pll_round_rate,
+ .set_rate = sam9x60_frac_pll_set_rate_chg,
+};
+
static int sam9x60_div_pll_prepare(struct clk_hw *hw)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
@@ -384,6 +444,44 @@ static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_div *div = to_sam9x60_div(core);
+ struct regmap *regmap = core->regmap;
+ unsigned long irqflags;
+ unsigned int val, cdiv;
+
+ div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1;
+
+ spin_lock_irqsave(core->lock, irqflags);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+ cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
+
+ /* Stop if nothing changed. */
+ if (cdiv == div->div)
+ goto unlock;
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+ core->layout->div_mask,
+ (div->div << core->layout->div_shift));
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ while (!sam9x60_pll_ready(regmap, core->id))
+ cpu_relax();
+
+unlock:
+ spin_unlock_irqrestore(core->lock, irqflags);
+
+ return 0;
+}
+
static const struct clk_ops sam9x60_div_pll_ops = {
.prepare = sam9x60_div_pll_prepare,
.unprepare = sam9x60_div_pll_unprepare,
@@ -393,17 +491,26 @@ static const struct clk_ops sam9x60_div_pll_ops = {
.set_rate = sam9x60_div_pll_set_rate,
};
+static const struct clk_ops sam9x60_div_pll_ops_chg = {
+ .prepare = sam9x60_div_pll_prepare,
+ .unprepare = sam9x60_div_pll_unprepare,
+ .is_prepared = sam9x60_div_pll_is_prepared,
+ .recalc_rate = sam9x60_div_pll_recalc_rate,
+ .round_rate = sam9x60_div_pll_round_rate,
+ .set_rate = sam9x60_div_pll_set_rate_chg,
+};
+
struct clk_hw * __init
sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name,
struct clk_hw *parent_hw, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, bool critical)
+ const struct clk_pll_layout *layout, u32 flags)
{
struct sam9x60_frac *frac;
struct clk_hw *hw;
struct clk_init_data init;
- unsigned long parent_rate, flags;
+ unsigned long parent_rate, irqflags;
unsigned int val;
int ret;
@@ -417,10 +524,12 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
init.name = name;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.ops = &sam9x60_frac_pll_ops;
- init.flags = CLK_SET_RATE_GATE;
- if (critical)
- init.flags |= CLK_IS_CRITICAL;
+ if (flags & CLK_SET_RATE_GATE)
+ init.ops = &sam9x60_frac_pll_ops;
+ else
+ init.ops = &sam9x60_frac_pll_ops_chg;
+
+ init.flags = flags;
frac->core.id = id;
frac->core.hw.init = &init;
@@ -429,7 +538,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
frac->core.regmap = regmap;
frac->core.lock = lock;
- spin_lock_irqsave(frac->core.lock, flags);
+ spin_lock_irqsave(frac->core.lock, irqflags);
if (sam9x60_pll_ready(regmap, id)) {
regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
AT91_PMC_PLL_UPDT_ID_MSK, id);
@@ -457,7 +566,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
goto free;
}
}
- spin_unlock_irqrestore(frac->core.lock, flags);
+ spin_unlock_irqrestore(frac->core.lock, irqflags);
hw = &frac->core.hw;
ret = clk_hw_register(NULL, hw);
@@ -469,7 +578,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
return hw;
free:
- spin_unlock_irqrestore(frac->core.lock, flags);
+ spin_unlock_irqrestore(frac->core.lock, irqflags);
kfree(frac);
return hw;
}
@@ -478,12 +587,12 @@ struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, bool critical)
+ const struct clk_pll_layout *layout, u32 flags)
{
struct sam9x60_div *div;
struct clk_hw *hw;
struct clk_init_data init;
- unsigned long flags;
+ unsigned long irqflags;
unsigned int val;
int ret;
@@ -497,11 +606,11 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
init.name = name;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.ops = &sam9x60_div_pll_ops;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT;
- if (critical)
- init.flags |= CLK_IS_CRITICAL;
+ if (flags & CLK_SET_RATE_GATE)
+ init.ops = &sam9x60_div_pll_ops;
+ else
+ init.ops = &sam9x60_div_pll_ops_chg;
+ init.flags = flags;
div->core.id = id;
div->core.hw.init = &init;
@@ -510,14 +619,14 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
div->core.regmap = regmap;
div->core.lock = lock;
- spin_lock_irqsave(div->core.lock, flags);
+ spin_lock_irqsave(div->core.lock, irqflags);
regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
AT91_PMC_PLL_UPDT_ID_MSK, id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
div->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
- spin_unlock_irqrestore(div->core.lock, flags);
+ spin_unlock_irqrestore(div->core.lock, irqflags);
hw = &div->core.hw;
ret = clk_hw_register(NULL, hw);
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index a50084de97d4..a97b99c2dc12 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -24,6 +24,8 @@
#define GCK_INDEX_DT_AUDIO_PLL 5
+static DEFINE_SPINLOCK(mck_lock);
+
#ifdef CONFIG_HAVE_AT91_AUDIO_PLL
static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
{
@@ -388,9 +390,16 @@ of_at91_clk_master_setup(struct device_node *np,
if (IS_ERR(regmap))
return;
- hw = at91_clk_register_master(regmap, name, num_parents,
- parent_names, layout,
- characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", num_parents,
+ parent_names, layout,
+ characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto out_free_characteristics;
+
+ hw = at91_clk_register_master_div(regmap, name, "masterck_pres",
+ layout, characteristics,
+ &mck_lock, CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto out_free_characteristics;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 7b86affc6d7c..a49076c804a9 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -48,7 +48,7 @@ extern const struct clk_master_layout at91sam9x5_master_layout;
struct clk_master_characteristics {
struct clk_range output;
- u32 divisors[4];
+ u32 divisors[5];
u8 have_div3_pres;
};
@@ -155,10 +155,18 @@ at91_clk_register_sam9x5_main(struct regmap *regmap, const char *name,
const char **parent_names, int num_parents);
struct clk_hw * __init
-at91_clk_register_master(struct regmap *regmap, const char *name,
- int num_parents, const char **parent_names,
- const struct clk_master_layout *layout,
- const struct clk_master_characteristics *characteristics);
+at91_clk_register_master_pres(struct regmap *regmap, const char *name,
+ int num_parents, const char **parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics,
+ spinlock_t *lock, u32 flags, int chg_pid);
+
+struct clk_hw * __init
+at91_clk_register_master_div(struct regmap *regmap, const char *name,
+ const char *parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics,
+ spinlock_t *lock, u32 flags);
struct clk_hw * __init
at91_clk_sama7g5_register_master(struct regmap *regmap,
@@ -190,14 +198,14 @@ struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, bool critical);
+ const struct clk_pll_layout *layout, u32 flags);
struct clk_hw * __init
sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name,
struct clk_hw *parent_hw, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, bool critical);
+ const struct clk_pll_layout *layout, u32 flags);
struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap, const char *name,
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 3c4c95603595..5f6fa89571b7 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -8,6 +8,7 @@
#include "pmc.h"
static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(mck_lock);
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 140000000, .max = 200000000 },
@@ -76,11 +77,11 @@ static const struct {
char *p;
u8 id;
} sam9x60_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
{ .n = "uhpck", .p = "usbck", .id = 6 },
{ .n = "pck0", .p = "prog0", .id = 8 },
{ .n = "pck1", .p = "prog1", .id = 9 },
- { .n = "qspick", .p = "masterck", .id = 19 },
+ { .n = "qspick", .p = "masterck_div", .id = 19 },
};
static const struct {
@@ -174,7 +175,6 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
struct regmap *regmap;
struct clk_hw *hw;
int i;
- bool bypass;
i = of_property_match_string(np, "clock-names", "td_slck");
if (i < 0)
@@ -209,10 +209,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
- bypass = of_property_read_bool(np, "atmel,osc-bypass");
-
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
- bypass);
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, 0);
if (IS_ERR(hw))
goto err_free;
main_osc_hw = hw;
@@ -228,13 +225,24 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "pllack_fracck",
"mainck", sam9x60_pmc->chws[PMC_MAIN],
0, &plla_characteristics,
- &pll_frac_layout, true);
+ &pll_frac_layout,
+ /*
+ * This feeds pllack_divck which
+ * feeds CPU. It should not be
+ * disabled.
+ */
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "pllack_divck",
"pllack_fracck", 0, &plla_characteristics,
- &pll_div_layout, true);
+ &pll_div_layout,
+ /*
+ * This feeds CPU. It should not
+ * be disabled.
+ */
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -243,13 +251,16 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "upllck_fracck",
"main_osc", main_osc_hw, 1,
&upll_characteristics,
- &pll_frac_layout, false);
+ &pll_frac_layout, CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "upllck_divck",
"upllck_fracck", 1, &upll_characteristics,
- &pll_div_layout, false);
+ &pll_div_layout,
+ CLK_SET_RATE_GATE |
+ CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT);
if (IS_ERR(hw))
goto err_free;
@@ -258,9 +269,17 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = "mainck";
parent_names[2] = "pllack_divck";
- hw = at91_clk_register_master(regmap, "masterck", 3, parent_names,
- &sam9x60_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 3,
+ parent_names, &sam9x60_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres", &sam9x60_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -276,7 +295,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = td_slck_name;
parent_names[2] = "mainck";
- parent_names[3] = "masterck";
+ parent_names[3] = "masterck_div";
parent_names[4] = "pllack_divck";
parent_names[5] = "upllck_divck";
for (i = 0; i < 2; i++) {
@@ -308,7 +327,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sam9x60_pcr_layout,
sam9x60_periphck[i].n,
- "masterck",
+ "masterck_div",
sam9x60_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 8b220762941a..9a5cbc7cd55a 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 124000000, .max = 166000000 },
.divisors = { 1, 2, 4, 3 },
@@ -40,14 +42,14 @@ static const struct {
char *p;
u8 id;
} sama5d2_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "lcdck", .p = "masterck", .id = 3 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "udpck", .p = "usbck", .id = 7 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
- { .n = "pck2", .p = "prog2", .id = 10 },
- { .n = "iscck", .p = "masterck", .id = 18 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "lcdck", .p = "masterck_div", .id = 3 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "iscck", .p = "masterck_div", .id = 18 },
};
static const struct {
@@ -235,15 +237,25 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
sama5d2_pmc->chws[PMC_MCK] = hw;
- hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck");
+ hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck_div");
if (IS_ERR(hw))
goto err_free;
@@ -259,7 +271,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < 3; i++) {
char name[6];
@@ -290,7 +302,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d2_pcr_layout,
sama5d2_periphck[i].n,
- "masterck",
+ "masterck_div",
sama5d2_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
@@ -317,7 +329,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index 7c6e0a5b9dc8..87009ee8effc 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 0, .max = 166000000 },
.divisors = { 1, 2, 4, 3 },
@@ -40,14 +42,14 @@ static const struct {
char *p;
u8 id;
} sama5d3_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "lcdck", .p = "masterck", .id = 3 },
- { .n = "smdck", .p = "smdclk", .id = 4 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "udpck", .p = "usbck", .id = 7 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
- { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "lcdck", .p = "masterck_div", .id = 3 },
+ { .n = "smdck", .p = "smdclk", .id = 4 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
};
static const struct {
@@ -170,9 +172,19 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -192,7 +204,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 3; i++) {
char name[6];
@@ -222,7 +234,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d3_pcr_layout,
sama5d3_periphck[i].n,
- "masterck",
+ "masterck_div",
sama5d3_periphck[i].id,
&sama5d3_periphck[i].r,
INT_MIN);
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index 92d8d4141b43..57fff790188b 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 125000000, .max = 200000000 },
.divisors = { 1, 2, 4, 3 },
@@ -39,14 +41,14 @@ static const struct {
char *p;
u8 id;
} sama5d4_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "lcdck", .p = "masterck", .id = 3 },
- { .n = "smdck", .p = "smdclk", .id = 4 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "udpck", .p = "usbck", .id = 7 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
- { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "lcdck", .p = "masterck_div", .id = 3 },
+ { .n = "smdck", .p = "smdclk", .id = 4 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
};
static const struct {
@@ -185,15 +187,25 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
sama5d4_pmc->chws[PMC_MCK] = hw;
- hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck");
+ hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck_div");
if (IS_ERR(hw))
goto err_free;
@@ -215,7 +227,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 3; i++) {
char name[6];
@@ -245,7 +257,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d4_pcr_layout,
sama5d4_periphck[i].n,
- "masterck",
+ "masterck_div",
sama5d4_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 0db2ab3eca14..a6e20b35960e 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -32,6 +32,7 @@
} while (0)
static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(pmc_mck0_lock);
static DEFINE_SPINLOCK(pmc_mckX_lock);
/**
@@ -89,118 +90,198 @@ static const struct clk_pll_layout pll_layout_divio = {
.endiv_shift = 30,
};
+/*
+ * CPU PLL output range.
+ * Notice: The upper limit has been setup to 1000000002 due to hardware
+ * block which cannot output exactly 1GHz.
+ */
+static const struct clk_range cpu_pll_outputs[] = {
+ { .min = 2343750, .max = 1000000002 },
+};
+
+/* PLL output range. */
+static const struct clk_range pll_outputs[] = {
+ { .min = 2343750, .max = 1200000000 },
+};
+
+/* CPU PLL characteristics. */
+static const struct clk_pll_characteristics cpu_pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(cpu_pll_outputs),
+ .output = cpu_pll_outputs,
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(pll_outputs),
+ .output = pll_outputs,
+};
+
/**
* PLL clocks description
* @n: clock name
* @p: clock parent
* @l: clock layout
+ * @c: clock characteristics
* @t: clock type
- * @f: true if clock is critical and cannot be disabled
+ * @f: clock flags
* @eid: export index in sama7g5->chws[] array
*/
static const struct {
const char *n;
const char *p;
const struct clk_pll_layout *l;
+ const struct clk_pll_characteristics *c;
+ unsigned long f;
u8 t;
- u8 c;
u8 eid;
} sama7g5_plls[][PLL_ID_MAX] = {
[PLL_ID_CPU] = {
{ .n = "cpupll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
+ .c = &cpu_pll_characteristics,
.t = PLL_TYPE_FRAC,
- .c = 1, },
+ /*
+ * This feeds cpupll_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL, },
{ .n = "cpupll_divpmcck",
.p = "cpupll_fracck",
.l = &pll_layout_divpmc,
+ .c = &cpu_pll_characteristics,
.t = PLL_TYPE_DIV,
- .c = 1, },
+ /* This feeds CPU. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .eid = PMC_CPUPLL, },
},
[PLL_ID_SYS] = {
{ .n = "syspll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
+ .c = &pll_characteristics,
.t = PLL_TYPE_FRAC,
- .c = 1, },
+ /*
+ * This feeds syspll_divpmcck which may feed critial parts
+ * of the systems like timers. Therefore it should not be
+ * disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, },
{ .n = "syspll_divpmcck",
.p = "syspll_fracck",
.l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
.t = PLL_TYPE_DIV,
- .c = 1, },
+ /*
+ * This may feed critial parts of the systems like timers.
+ * Therefore it should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_SYSPLL, },
},
[PLL_ID_DDR] = {
{ .n = "ddrpll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
+ .c = &pll_characteristics,
.t = PLL_TYPE_FRAC,
- .c = 1, },
+ /*
+ * This feeds ddrpll_divpmcck which feeds DDR. It should not
+ * be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, },
{ .n = "ddrpll_divpmcck",
.p = "ddrpll_fracck",
.l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
.t = PLL_TYPE_DIV,
- .c = 1, },
+ /* This feeds DDR. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, },
},
[PLL_ID_IMG] = {
{ .n = "imgpll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
- .t = PLL_TYPE_FRAC, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE, },
{ .n = "imgpll_divpmcck",
.p = "imgpll_fracck",
.l = &pll_layout_divpmc,
- .t = PLL_TYPE_DIV, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT, },
},
[PLL_ID_BAUD] = {
{ .n = "baudpll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
- .t = PLL_TYPE_FRAC, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE, },
{ .n = "baudpll_divpmcck",
.p = "baudpll_fracck",
.l = &pll_layout_divpmc,
- .t = PLL_TYPE_DIV, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT, },
},
[PLL_ID_AUDIO] = {
{ .n = "audiopll_fracck",
.p = "main_xtal",
.l = &pll_layout_frac,
- .t = PLL_TYPE_FRAC, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE, },
{ .n = "audiopll_divpmcck",
.p = "audiopll_fracck",
.l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
.t = PLL_TYPE_DIV,
- .eid = PMC_I2S0_MUX, },
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOPMCPLL, },
{ .n = "audiopll_diviock",
.p = "audiopll_fracck",
.l = &pll_layout_divio,
+ .c = &pll_characteristics,
.t = PLL_TYPE_DIV,
- .eid = PMC_I2S1_MUX, },
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOIOPLL, },
},
[PLL_ID_ETH] = {
{ .n = "ethpll_fracck",
.p = "main_xtal",
.l = &pll_layout_frac,
- .t = PLL_TYPE_FRAC, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE, },
{ .n = "ethpll_divpmcck",
.p = "ethpll_fracck",
.l = &pll_layout_divpmc,
- .t = PLL_TYPE_DIV, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT, },
},
};
@@ -245,7 +326,7 @@ static const struct {
.ep = { "syspll_divpmcck", "ddrpll_divpmcck", "imgpll_divpmcck", },
.ep_mux_table = { 5, 6, 7, },
.ep_count = 3,
- .ep_chg_id = 6, },
+ .ep_chg_id = 5, },
{ .n = "mck4",
.id = 4,
@@ -278,7 +359,7 @@ static const struct {
};
/* Mux table for programmable clocks. */
-static u32 sama7g5_prog_mux_table[] = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, };
+static u32 sama7g5_prog_mux_table[] = { 0, 1, 2, 5, 6, 7, 8, 9, 10, };
/**
* Peripheral clock description
@@ -401,7 +482,7 @@ static const struct {
.pp = { "audiopll_divpmcck", },
.pp_mux_table = { 9, },
.pp_count = 1,
- .pp_chg_id = 4, },
+ .pp_chg_id = 3, },
{ .n = "csi_gclk",
.id = 33,
@@ -513,7 +594,7 @@ static const struct {
.pp = { "ethpll_divpmcck", },
.pp_mux_table = { 10, },
.pp_count = 1,
- .pp_chg_id = 4, },
+ .pp_chg_id = 3, },
{ .n = "gmac1_gclk",
.id = 52,
@@ -545,7 +626,7 @@ static const struct {
.pp = { "syspll_divpmcck", "audiopll_divpmcck", },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "i2smcc1_gclk",
.id = 58,
@@ -553,7 +634,7 @@ static const struct {
.pp = { "syspll_divpmcck", "audiopll_divpmcck", },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "mcan0_gclk",
.id = 61,
@@ -695,7 +776,7 @@ static const struct {
.pp = { "syspll_divpmcck", "baudpll_divpmcck", },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "sdmmc1_gclk",
.id = 81,
@@ -703,7 +784,7 @@ static const struct {
.pp = { "syspll_divpmcck", "baudpll_divpmcck", },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "sdmmc2_gclk",
.id = 82,
@@ -711,7 +792,7 @@ static const struct {
.pp = { "syspll_divpmcck", "baudpll_divpmcck", },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "spdifrx_gclk",
.id = 84,
@@ -719,7 +800,7 @@ static const struct {
.pp = { "syspll_divpmcck", "audiopll_divpmcck", },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "spdiftx_gclk",
.id = 85,
@@ -727,7 +808,7 @@ static const struct {
.pp = { "syspll_divpmcck", "audiopll_divpmcck", },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "tcb0_ch0_gclk",
.id = 88,
@@ -758,28 +839,16 @@ static const struct {
.pp_chg_id = INT_MIN, },
};
-/* PLL output range. */
-static const struct clk_range pll_outputs[] = {
- { .min = 2343750, .max = 1200000000 },
-};
-
-/* PLL characteristics. */
-static const struct clk_pll_characteristics pll_characteristics = {
- .input = { .min = 12000000, .max = 50000000 },
- .num_output = ARRAY_SIZE(pll_outputs),
- .output = pll_outputs,
-};
-
/* MCK0 characteristics. */
static const struct clk_master_characteristics mck0_characteristics = {
- .output = { .min = 140000000, .max = 200000000 },
- .divisors = { 1, 2, 4, 3 },
+ .output = { .min = 50000000, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3, 5 },
.have_div3_pres = 1,
};
/* MCK0 layout. */
static const struct clk_master_layout mck0_layout = {
- .mask = 0x373,
+ .mask = 0x773,
.pres_shift = 4,
.offset = 0x28,
};
@@ -835,10 +904,10 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sama7g5_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1,
+ sama7g5_pmc = pmc_data_allocate(PMC_CPU + 1,
nck(sama7g5_systemck),
nck(sama7g5_periphck),
- nck(sama7g5_gck));
+ nck(sama7g5_gck), 8);
if (!sama7g5_pmc)
return;
@@ -886,18 +955,18 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
hw = sam9x60_clk_register_frac_pll(regmap,
&pmc_pll_lock, sama7g5_plls[i][j].n,
sama7g5_plls[i][j].p, parent_hw, i,
- &pll_characteristics,
+ sama7g5_plls[i][j].c,
sama7g5_plls[i][j].l,
- sama7g5_plls[i][j].c);
+ sama7g5_plls[i][j].f);
break;
case PLL_TYPE_DIV:
hw = sam9x60_clk_register_div_pll(regmap,
&pmc_pll_lock, sama7g5_plls[i][j].n,
sama7g5_plls[i][j].p, i,
- &pll_characteristics,
+ sama7g5_plls[i][j].c,
sama7g5_plls[i][j].l,
- sama7g5_plls[i][j].c);
+ sama7g5_plls[i][j].f);
break;
default:
@@ -912,12 +981,19 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
}
}
- parent_names[0] = md_slck_name;
- parent_names[1] = "mainck";
- parent_names[2] = "cpupll_divpmcck";
- parent_names[3] = "syspll_divpmcck";
- hw = at91_clk_register_master(regmap, "mck0", 4, parent_names,
- &mck0_layout, &mck0_characteristics);
+ parent_names[0] = "cpupll_divpmcck";
+ hw = at91_clk_register_master_pres(regmap, "cpuck", 1, parent_names,
+ &mck0_layout, &mck0_characteristics,
+ &pmc_mck0_lock,
+ CLK_SET_RATE_PARENT, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7g5_pmc->chws[PMC_CPU] = hw;
+
+ hw = at91_clk_register_master_div(regmap, "mck0", "cpuck",
+ &mck0_layout, &mck0_characteristics,
+ &pmc_mck0_lock, 0);
if (IS_ERR(hw))
goto err_free;
@@ -926,9 +1002,8 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = td_slck_name;
parent_names[2] = "mainck";
- parent_names[3] = "mck0";
for (i = 0; i < ARRAY_SIZE(sama7g5_mckx); i++) {
- u8 num_parents = 4 + sama7g5_mckx[i].ep_count;
+ u8 num_parents = 3 + sama7g5_mckx[i].ep_count;
u32 *mux_table;
mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
@@ -936,10 +1011,10 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 4);
- SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_mckx[i].ep_mux_table,
+ SAMA7G5_INIT_TABLE(mux_table, 3);
+ SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table,
sama7g5_mckx[i].ep_count);
- SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_mckx[i].ep,
+ SAMA7G5_FILL_TABLE(&parent_names[3], sama7g5_mckx[i].ep,
sama7g5_mckx[i].ep_count);
hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n,
@@ -962,24 +1037,25 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = td_slck_name;
parent_names[2] = "mainck";
- parent_names[3] = "mck0";
- parent_names[4] = "syspll_divpmcck";
- parent_names[5] = "ddrpll_divpmcck";
- parent_names[6] = "imgpll_divpmcck";
- parent_names[7] = "baudpll_divpmcck";
- parent_names[8] = "audiopll_divpmcck";
- parent_names[9] = "ethpll_divpmcck";
+ parent_names[3] = "syspll_divpmcck";
+ parent_names[4] = "ddrpll_divpmcck";
+ parent_names[5] = "imgpll_divpmcck";
+ parent_names[6] = "baudpll_divpmcck";
+ parent_names[7] = "audiopll_divpmcck";
+ parent_names[8] = "ethpll_divpmcck";
for (i = 0; i < 8; i++) {
char name[6];
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name, parent_names,
- 10, i,
+ 9, i,
&programmable_layout,
sama7g5_prog_mux_table);
if (IS_ERR(hw))
goto err_free;
+
+ sama7g5_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sama7g5_systemck); i++) {
@@ -1010,9 +1086,8 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = td_slck_name;
parent_names[2] = "mainck";
- parent_names[3] = "mck0";
for (i = 0; i < ARRAY_SIZE(sama7g5_gck); i++) {
- u8 num_parents = 4 + sama7g5_gck[i].pp_count;
+ u8 num_parents = 3 + sama7g5_gck[i].pp_count;
u32 *mux_table;
mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
@@ -1020,10 +1095,10 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 4);
- SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_gck[i].pp_mux_table,
+ SAMA7G5_INIT_TABLE(mux_table, 3);
+ SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table,
sama7g5_gck[i].pp_count);
- SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_gck[i].pp,
+ SAMA7G5_FILL_TABLE(&parent_names[3], sama7g5_gck[i].pp,
sama7g5_gck[i].pp_count);
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
@@ -1052,7 +1127,7 @@ err_free:
kfree(alloc_mem);
}
- pmc_data_free(sama7g5_pmc);
+ kfree(sama7g5_pmc);
}
/* Some clks are used for a clocksource */
diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
index 8333e20dc9d2..e63a42618ac2 100644
--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
+++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
@@ -25,7 +25,6 @@ static const struct clk_parent_data clk_dvp_parent = {
static int clk_dvp_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *data;
- struct resource *res;
struct clk_dvp *dvp;
void __iomem *base;
int ret;
@@ -42,7 +41,7 @@ static int clk_dvp_probe(struct platform_device *pdev)
return -ENOMEM;
data = dvp->data;
- base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -108,6 +107,7 @@ static const struct of_device_id clk_dvp_dt_ids[] = {
{ .compatible = "brcm,brcm2711-dvp", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, clk_dvp_dt_ids);
static struct platform_driver clk_dvp_driver = {
.probe = clk_dvp_probe,
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 14d803e6af62..ad86e031ba3e 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -46,9 +46,17 @@
#define MMCM_CLK_DIV_DIVIDE BIT(11)
#define MMCM_CLK_DIV_NOCOUNT BIT(12)
+struct axi_clkgen_limits {
+ unsigned int fpfd_min;
+ unsigned int fpfd_max;
+ unsigned int fvco_min;
+ unsigned int fvco_max;
+};
+
struct axi_clkgen {
void __iomem *base;
struct clk_hw clk_hw;
+ struct axi_clkgen_limits limits;
};
static uint32_t axi_clkgen_lookup_filter(unsigned int m)
@@ -100,12 +108,15 @@ static uint32_t axi_clkgen_lookup_lock(unsigned int m)
return 0x1f1f00fa;
}
-static const unsigned int fpfd_min = 10000;
-static const unsigned int fpfd_max = 300000;
-static const unsigned int fvco_min = 600000;
-static const unsigned int fvco_max = 1200000;
+static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
+ .fpfd_min = 10000,
+ .fpfd_max = 300000,
+ .fvco_min = 600000,
+ .fvco_max = 1200000,
+};
-static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout,
+static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
+ unsigned long fin, unsigned long fout,
unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
{
unsigned long d, d_min, d_max, _d_min, _d_max;
@@ -122,12 +133,12 @@ static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout,
*best_m = 0;
*best_dout = 0;
- d_min = max_t(unsigned long, DIV_ROUND_UP(fin, fpfd_max), 1);
- d_max = min_t(unsigned long, fin / fpfd_min, 80);
+ d_min = max_t(unsigned long, DIV_ROUND_UP(fin, limits->fpfd_max), 1);
+ d_max = min_t(unsigned long, fin / limits->fpfd_min, 80);
again:
- fvco_min_fract = fvco_min << fract_shift;
- fvco_max_fract = fvco_max << fract_shift;
+ fvco_min_fract = limits->fvco_min << fract_shift;
+ fvco_max_fract = limits->fvco_max << fract_shift;
m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
m_max = min_t(unsigned long, fvco_max_fract * d_max / fin, 64 << fract_shift);
@@ -319,6 +330,7 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
unsigned long rate, unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+ const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
struct axi_clkgen_div_params params;
uint32_t power = 0;
@@ -328,7 +340,7 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
if (parent_rate == 0 || rate == 0)
return -EINVAL;
- axi_clkgen_calc_params(parent_rate, rate, &d, &m, &dout);
+ axi_clkgen_calc_params(limits, parent_rate, rate, &d, &m, &dout);
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
@@ -368,10 +380,12 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
static long axi_clkgen_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(hw);
+ const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
unsigned long long tmp;
- axi_clkgen_calc_params(*parent_rate, rate, &d, &m, &dout);
+ axi_clkgen_calc_params(limits, *parent_rate, rate, &d, &m, &dout);
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
@@ -482,17 +496,9 @@ static const struct clk_ops axi_clkgen_ops = {
.get_parent = axi_clkgen_get_parent,
};
-static const struct of_device_id axi_clkgen_ids[] = {
- {
- .compatible = "adi,axi-clkgen-2.00.a",
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
-
static int axi_clkgen_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
+ const struct axi_clkgen_limits *dflt_limits;
struct axi_clkgen *axi_clkgen;
struct clk_init_data init;
const char *parent_names[2];
@@ -501,11 +507,8 @@ static int axi_clkgen_probe(struct platform_device *pdev)
unsigned int i;
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
- id = of_match_node(axi_clkgen_ids, pdev->dev.of_node);
- if (!id)
+ dflt_limits = device_get_match_data(&pdev->dev);
+ if (!dflt_limits)
return -ENODEV;
axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL);
@@ -527,6 +530,8 @@ static int axi_clkgen_probe(struct platform_device *pdev)
return -EINVAL;
}
+ memcpy(&axi_clkgen->limits, dflt_limits, sizeof(axi_clkgen->limits));
+
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
&clk_name);
@@ -554,6 +559,15 @@ static int axi_clkgen_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id axi_clkgen_ids[] = {
+ {
+ .compatible = "adi,axi-clkgen-2.00.a",
+ .data = &axi_clkgen_zynq_default_limits,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
+
static struct platform_driver axi_clkgen_driver = {
.driver = {
.name = "adi-axi-clkgen",
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 2ddb54f7d3ab..0506046a5f4b 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -405,3 +406,52 @@ void clk_hw_unregister_composite(struct clk_hw *hw)
kfree(composite);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_composite);
+
+static void devm_clk_hw_release_composite(struct device *dev, void *res)
+{
+ clk_hw_unregister_composite(*(struct clk_hw **)res);
+}
+
+static struct clk_hw *__devm_clk_hw_register_composite(struct device *dev,
+ const char *name, const char * const *parent_names,
+ const struct clk_parent_data *pdata, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ struct clk_hw **ptr, *hw;
+
+ ptr = devres_alloc(devm_clk_hw_release_composite, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ hw = __clk_hw_register_composite(dev, name, parent_names, pdata,
+ num_parents, mux_hw, mux_ops, rate_hw,
+ rate_ops, gate_hw, gate_ops, flags);
+
+ if (!IS_ERR(hw)) {
+ *ptr = hw;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hw;
+}
+
+struct clk_hw *devm_clk_hw_register_composite_pdata(struct device *dev,
+ const char *name,
+ const struct clk_parent_data *parent_data,
+ int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ return __devm_clk_hw_register_composite(dev, name, NULL, parent_data,
+ num_parents, mux_hw, mux_ops,
+ rate_hw, rate_ops, gate_hw,
+ gate_ops, flags);
+}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8de12cb0c43d..c499799693cc 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -578,3 +579,36 @@ void clk_hw_unregister_divider(struct clk_hw *hw)
kfree(div);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_divider);
+
+static void devm_clk_hw_release_divider(struct device *dev, void *res)
+{
+ clk_hw_unregister_divider(*(struct clk_hw **)res);
+}
+
+struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock)
+{
+ struct clk_hw **ptr, *hw;
+
+ ptr = devres_alloc(devm_clk_hw_release_divider, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ hw = __clk_hw_register_divider(dev, np, name, parent_name, parent_hw,
+ parent_data, flags, reg, shift, width,
+ clk_divider_flags, table, lock);
+
+ if (!IS_ERR(hw)) {
+ *ptr = hw;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(__devm_clk_hw_register_divider);
diff --git a/drivers/clk/clk-fsl-flexspi.c b/drivers/clk/clk-fsl-flexspi.c
new file mode 100644
index 000000000000..8432d681e2d9
--- /dev/null
+++ b/drivers/clk/clk-fsl-flexspi.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Layerscape FlexSPI clock driver
+ *
+ * Copyright 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static const struct clk_div_table ls1028a_flexspi_divs[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 3, },
+ { .val = 3, .div = 4, },
+ { .val = 4, .div = 5, },
+ { .val = 5, .div = 6, },
+ { .val = 6, .div = 7, },
+ { .val = 7, .div = 8, },
+ { .val = 11, .div = 12, },
+ { .val = 15, .div = 16, },
+ { .val = 16, .div = 20, },
+ { .val = 17, .div = 24, },
+ { .val = 18, .div = 28, },
+ { .val = 19, .div = 32, },
+ { .val = 20, .div = 80, },
+ {}
+};
+
+static const struct clk_div_table lx2160a_flexspi_divs[] = {
+ { .val = 1, .div = 2, },
+ { .val = 3, .div = 4, },
+ { .val = 5, .div = 6, },
+ { .val = 7, .div = 8, },
+ { .val = 11, .div = 12, },
+ { .val = 15, .div = 16, },
+ { .val = 16, .div = 20, },
+ { .val = 17, .div = 24, },
+ { .val = 18, .div = 28, },
+ { .val = 19, .div = 32, },
+ { .val = 20, .div = 80, },
+ {}
+};
+
+static int fsl_flexspi_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const char *clk_name = np->name;
+ const char *clk_parent;
+ struct resource *res;
+ void __iomem *reg;
+ struct clk_hw *hw;
+ const struct clk_div_table *divs;
+
+ divs = device_get_match_data(dev);
+ if (!divs)
+ return -ENOENT;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ /*
+ * Can't use devm_ioremap_resource() or devm_of_iomap() because the
+ * resource might already be taken by the parent device.
+ */
+ reg = devm_ioremap(dev, res->start, resource_size(res));
+ if (!reg)
+ return -ENOMEM;
+
+ clk_parent = of_clk_get_parent_name(np, 0);
+ if (!clk_parent)
+ return -EINVAL;
+
+ of_property_read_string(np, "clock-output-names", &clk_name);
+
+ hw = devm_clk_hw_register_divider_table(dev, clk_name, clk_parent, 0,
+ reg, 0, 5, 0, divs, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
+static const struct of_device_id fsl_flexspi_clk_dt_ids[] = {
+ { .compatible = "fsl,ls1028a-flexspi-clk", .data = &ls1028a_flexspi_divs },
+ { .compatible = "fsl,lx2160a-flexspi-clk", .data = &lx2160a_flexspi_divs },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_flexspi_clk_dt_ids);
+
+static struct platform_driver fsl_flexspi_clk_driver = {
+ .driver = {
+ .name = "fsl-flexspi-clk",
+ .of_match_table = fsl_flexspi_clk_dt_ids,
+ },
+ .probe = fsl_flexspi_clk_probe,
+};
+module_platform_driver(fsl_flexspi_clk_driver);
+
+MODULE_DESCRIPTION("FlexSPI clock driver for Layerscape SoCs");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c
index 0221180a4dd7..6238fcea0467 100644
--- a/drivers/clk/clk-fsl-sai.c
+++ b/drivers/clk/clk-fsl-sai.c
@@ -58,13 +58,13 @@ static int fsl_sai_clk_probe(struct platform_device *pdev)
/* set clock direction, we are the BCLK master */
writel(CR2_BCD, base + I2S_CR2);
- hw = clk_hw_register_composite_pdata(dev, dev->of_node->name,
- &pdata, 1, NULL, NULL,
- &sai_clk->div.hw,
- &clk_divider_ops,
- &sai_clk->gate.hw,
- &clk_gate_ops,
- CLK_SET_RATE_GATE);
+ hw = devm_clk_hw_register_composite_pdata(dev, dev->of_node->name,
+ &pdata, 1, NULL, NULL,
+ &sai_clk->div.hw,
+ &clk_divider_ops,
+ &sai_clk->gate.hw,
+ &clk_gate_ops,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
return PTR_ERR(hw);
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 86f2e2d3fc02..da2c8eddfd9f 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -147,7 +147,7 @@ static struct platform_driver clk_pwm_driver = {
.remove = clk_pwm_remove,
.driver = {
.name = "pwm-clock",
- .of_match_table = of_match_ptr(clk_pwm_dt_ids),
+ .of_match_table = clk_pwm_dt_ids,
},
};
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 46101c6a20f2..70aa521e7e7f 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -1368,33 +1369,33 @@ static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
idx = clkspec->args[1];
switch (type) {
- case 0:
+ case QORIQ_CLK_SYSCLK:
if (idx != 0)
goto bad_args;
clk = cg->sysclk;
break;
- case 1:
+ case QORIQ_CLK_CMUX:
if (idx >= ARRAY_SIZE(cg->cmux))
goto bad_args;
clk = cg->cmux[idx];
break;
- case 2:
+ case QORIQ_CLK_HWACCEL:
if (idx >= ARRAY_SIZE(cg->hwaccel))
goto bad_args;
clk = cg->hwaccel[idx];
break;
- case 3:
+ case QORIQ_CLK_FMAN:
if (idx >= ARRAY_SIZE(cg->fman))
goto bad_args;
clk = cg->fman[idx];
break;
- case 4:
+ case QORIQ_CLK_PLATFORM_PLL:
pll = &cg->pll[PLATFORM_PLL];
if (idx >= ARRAY_SIZE(pll->div))
goto bad_args;
clk = pll->div[idx].clk;
break;
- case 5:
+ case QORIQ_CLK_CORECLK:
if (idx != 0)
goto bad_args;
clk = cg->coreclk;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index aa21371f9104..a3e883a9f406 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -195,6 +195,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
return ret;
err_reg:
+ of_node_put(s2mps11_clks[0].clk_np);
while (--i >= 0)
clkdev_drop(s2mps11_clks[i].lookup);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 5a9b140dd8c8..a39af7616b13 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -129,7 +129,7 @@ static const struct clk_ops scpi_dvfs_ops = {
.set_rate = scpi_dvfs_set_rate,
};
-static const struct of_device_id scpi_clk_match[] = {
+static const struct of_device_id scpi_clk_match[] __maybe_unused = {
{ .compatible = "arm,scpi-dvfs-clocks", .data = &scpi_dvfs_ops, },
{ .compatible = "arm,scpi-variable-clocks", .data = &scpi_clk_ops, },
{}
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 1e1702e609cb..57e4597cdf4c 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -902,6 +902,10 @@ static int _si5351_clkout_set_disable_state(
static void _si5351_clkout_reset_pll(struct si5351_driver_data *drvdata, int num)
{
u8 val = si5351_reg_read(drvdata, SI5351_CLK0_CTRL + num);
+ u8 mask = val & SI5351_CLK_PLL_SELECT ? SI5351_PLL_RESET_B :
+ SI5351_PLL_RESET_A;
+ unsigned int v;
+ int err;
switch (val & SI5351_CLK_INPUT_MASK) {
case SI5351_CLK_INPUT_XTAL:
@@ -909,9 +913,12 @@ static void _si5351_clkout_reset_pll(struct si5351_driver_data *drvdata, int num
return; /* pll not used, no need to reset */
}
- si5351_reg_write(drvdata, SI5351_PLL_RESET,
- val & SI5351_CLK_PLL_SELECT ? SI5351_PLL_RESET_B :
- SI5351_PLL_RESET_A);
+ si5351_reg_write(drvdata, SI5351_PLL_RESET, mask);
+
+ err = regmap_read_poll_timeout(drvdata->regmap, SI5351_PLL_RESET, v,
+ !(v & mask), 0, 20000);
+ if (err < 0)
+ dev_err(&drvdata->client->dev, "Reset bit didn't clear\n");
dev_dbg(&drvdata->client->dev, "%s - %s: pll = %d\n",
__func__, clk_hw_get_name(&drvdata->clkout[num].hw),
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index c90460e7ef21..43db67337bc0 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -739,8 +739,8 @@ static int vc5_update_power(struct device_node *np_output,
{
u32 value;
- if (!of_property_read_u32(np_output,
- "idt,voltage-microvolts", &value)) {
+ if (!of_property_read_u32(np_output, "idt,voltage-microvolt",
+ &value)) {
clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_PWR_MASK;
switch (value) {
case 1800000:
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f83dac54ed85..3d751ae5bc70 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -420,7 +420,7 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
{
struct clk_parent_map *entry = &core->parents[index];
- struct clk_core *parent = ERR_PTR(-ENOENT);
+ struct clk_core *parent;
if (entry->hw) {
parent = entry->hw->core;
@@ -1164,6 +1164,27 @@ int clk_enable(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_enable);
+/**
+ * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
+ * @clk: clock source
+ *
+ * Returns true if clk_prepare() implicitly enables the clock, effectively
+ * making clk_enable()/clk_disable() no-ops, false otherwise.
+ *
+ * This is of interest mainly to power management code where actually
+ * disabling the clock also requires unpreparing it to have any material
+ * effect.
+ *
+ * Regardless of the value returned here, the caller must always invoke
+ * clk_enable() or clk_prepare_enable() and counterparts for usage counts
+ * to be right.
+ */
+bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return clk && !(clk->core->ops->enable && clk->core->ops->disable);
+}
+EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
+
static int clk_core_prepare_enable(struct clk_core *core)
{
int ret;
@@ -2314,6 +2335,8 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
if (!clk)
return 0;
+ trace_clk_set_rate_range(clk->core, min, max);
+
if (min > max) {
pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
__func__, clk->core->name, clk->dev_id, clk->con_id,
@@ -2381,6 +2404,8 @@ int clk_set_min_rate(struct clk *clk, unsigned long rate)
if (!clk)
return 0;
+ trace_clk_set_min_rate(clk->core, rate);
+
return clk_set_rate_range(clk, rate, clk->max_rate);
}
EXPORT_SYMBOL_GPL(clk_set_min_rate);
@@ -2397,6 +2422,8 @@ int clk_set_max_rate(struct clk *clk, unsigned long rate)
if (!clk)
return 0;
+ trace_clk_set_max_rate(clk->core, rate);
+
return clk_set_rate_range(clk, clk->min_rate, rate);
}
EXPORT_SYMBOL_GPL(clk_set_max_rate);
@@ -2931,7 +2958,14 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
else
seq_puts(s, "-----");
- seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
+ seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
+
+ if (c->ops->is_enabled)
+ seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
+ else if (!c->ops->enable)
+ seq_printf(s, " %9c\n", 'Y');
+ else
+ seq_printf(s, " %9c\n", '?');
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2950,9 +2984,9 @@ static int clk_summary_show(struct seq_file *s, void *data)
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
- seq_puts(s, " enable prepare protect duty\n");
- seq_puts(s, " clock count count count rate accuracy phase cycle\n");
- seq_puts(s, "---------------------------------------------------------------------------------------------\n");
+ seq_puts(s, " enable prepare protect duty hardware\n");
+ seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
+ seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
clk_prepare_lock();
@@ -3667,6 +3701,24 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
return clk;
}
+/**
+ * clk_hw_get_clk - get clk consumer given an clk_hw
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Returns: new clk consumer
+ * This is the function to be used by providers which need
+ * to get a consumer clk and act on the clock element
+ * Calls to this function must be balanced with calls clk_put()
+ */
+struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
+{
+ struct device *dev = hw->core->dev;
+
+ return clk_hw_create_clk(dev, hw, dev_name(dev), con_id);
+}
+EXPORT_SYMBOL(clk_hw_get_clk);
+
static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
{
const char *dst;
@@ -4068,12 +4120,12 @@ void clk_hw_unregister(struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_unregister);
-static void devm_clk_release(struct device *dev, void *res)
+static void devm_clk_unregister_cb(struct device *dev, void *res)
{
clk_unregister(*(struct clk **)res);
}
-static void devm_clk_hw_release(struct device *dev, void *res)
+static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
{
clk_hw_unregister(*(struct clk_hw **)res);
}
@@ -4093,7 +4145,7 @@ struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
struct clk *clk;
struct clk **clkp;
- clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+ clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
if (!clkp)
return ERR_PTR(-ENOMEM);
@@ -4123,7 +4175,7 @@ int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
struct clk_hw **hwp;
int ret;
- hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
+ hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
if (!hwp)
return -ENOMEM;
@@ -4167,7 +4219,7 @@ static int devm_clk_hw_match(struct device *dev, void *res, void *data)
*/
void devm_clk_unregister(struct device *dev, struct clk *clk)
{
- WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
+ WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
}
EXPORT_SYMBOL_GPL(devm_clk_unregister);
@@ -4182,11 +4234,54 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
*/
void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
{
- WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
+ WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
hw));
}
EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
+static void devm_clk_release(struct device *dev, void *res)
+{
+ clk_put(*(struct clk **)res);
+}
+
+/**
+ * devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
+ * @dev: device that is registering this clock
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Managed clk_hw_get_clk(). Clocks got with this function are
+ * automatically clk_put() on driver detach. See clk_put()
+ * for more information.
+ */
+struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
+ const char *con_id)
+{
+ struct clk *clk;
+ struct clk **clkp;
+
+ /* This should not happen because it would mean we have drivers
+ * passing around clk_hw pointers instead of having the caller use
+ * proper clk_get() style APIs
+ */
+ WARN_ON_ONCE(dev != hw->core->dev);
+
+ clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+ if (!clkp)
+ return ERR_PTR(-ENOMEM);
+
+ clk = clk_hw_get_clk(hw, con_id);
+ if (!IS_ERR(clk)) {
+ *clkp = clk;
+ devres_add(dev, clkp);
+ } else {
+ devres_free(clkp);
+ }
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
+
/*
* clkdev helpers
*/
@@ -4334,6 +4429,42 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
+struct clk_notifier_devres {
+ struct clk *clk;
+ struct notifier_block *nb;
+};
+
+static void devm_clk_notifier_release(struct device *dev, void *res)
+{
+ struct clk_notifier_devres *devres = res;
+
+ clk_notifier_unregister(devres->clk, devres->nb);
+}
+
+int devm_clk_notifier_register(struct device *dev, struct clk *clk,
+ struct notifier_block *nb)
+{
+ struct clk_notifier_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_clk_notifier_release,
+ sizeof(*devres), GFP_KERNEL);
+
+ if (!devres)
+ return -ENOMEM;
+
+ ret = clk_notifier_register(clk, nb);
+ if (!ret) {
+ devres->clk = clk;
+ devres->nb = nb;
+ } else {
+ devres_free(devres);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
+
#ifdef CONFIG_OF
static void clk_core_reparent_orphans(void)
{
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 3061896503f3..47d9ec3abd2f 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -6,8 +6,6 @@ config MXC_CLK
config MXC_CLK_SCU
tristate
- depends on ARCH_MXC
- depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
def_bool SOC_IMX1
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index 7eed7083f46e..f16c4019f402 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -30,6 +30,7 @@ struct clk_gate2 {
void __iomem *reg;
u8 bit_idx;
u8 cgr_val;
+ u8 cgr_mask;
u8 flags;
spinlock_t *lock;
unsigned int *share_count;
@@ -37,37 +38,38 @@ struct clk_gate2 {
#define to_clk_gate2(_hw) container_of(_hw, struct clk_gate2, hw)
-static int clk_gate2_enable(struct clk_hw *hw)
+static void clk_gate2_do_shared_clks(struct clk_hw *hw, bool enable)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
u32 reg;
+
+ reg = readl(gate->reg);
+ reg &= ~(gate->cgr_mask << gate->bit_idx);
+ if (enable)
+ reg |= (gate->cgr_val & gate->cgr_mask) << gate->bit_idx;
+ writel(reg, gate->reg);
+}
+
+static int clk_gate2_enable(struct clk_hw *hw)
+{
+ struct clk_gate2 *gate = to_clk_gate2(hw);
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(gate->lock, flags);
if (gate->share_count && (*gate->share_count)++ > 0)
goto out;
- if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
- ret = clk_gate_ops.enable(hw);
- } else {
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- reg |= gate->cgr_val << gate->bit_idx;
- writel(reg, gate->reg);
- }
-
+ clk_gate2_do_shared_clks(hw, true);
out:
spin_unlock_irqrestore(gate->lock, flags);
- return ret;
+ return 0;
}
static void clk_gate2_disable(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
- u32 reg;
unsigned long flags;
spin_lock_irqsave(gate->lock, flags);
@@ -79,23 +81,17 @@ static void clk_gate2_disable(struct clk_hw *hw)
goto out;
}
- if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
- clk_gate_ops.disable(hw);
- } else {
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- writel(reg, gate->reg);
- }
-
+ clk_gate2_do_shared_clks(hw, false);
out:
spin_unlock_irqrestore(gate->lock, flags);
}
-static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
+static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx,
+ u8 cgr_val, u8 cgr_mask)
{
u32 val = readl(reg);
- if (((val >> bit_idx) & 1) == 1)
+ if (((val >> bit_idx) & cgr_mask) == cgr_val)
return 1;
return 0;
@@ -104,29 +100,28 @@ static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
static int clk_gate2_is_enabled(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(gate->lock, flags);
- if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
- return clk_gate_ops.is_enabled(hw);
+ ret = clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx,
+ gate->cgr_val, gate->cgr_mask);
- return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return ret;
}
static void clk_gate2_disable_unused(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
unsigned long flags;
- u32 reg;
-
- if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
- return;
spin_lock_irqsave(gate->lock, flags);
- if (!gate->share_count || *gate->share_count == 0) {
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- writel(reg, gate->reg);
- }
+ if (!gate->share_count || *gate->share_count == 0)
+ clk_gate2_do_shared_clks(hw, false);
spin_unlock_irqrestore(gate->lock, flags);
}
@@ -140,7 +135,7 @@ static const struct clk_ops clk_gate2_ops = {
struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 bit_idx, u8 cgr_val,
+ void __iomem *reg, u8 bit_idx, u8 cgr_val, u8 cgr_mask,
u8 clk_gate2_flags, spinlock_t *lock,
unsigned int *share_count)
{
@@ -157,6 +152,7 @@ struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
gate->reg = reg;
gate->bit_idx = bit_idx;
gate->cgr_val = cgr_val;
+ gate->cgr_mask = cgr_mask;
gate->flags = clk_gate2_flags;
gate->lock = lock;
gate->share_count = share_count;
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index f358ad907299..7c905861af5d 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -653,7 +653,7 @@ static struct platform_driver imx8mm_clk_driver = {
* reloading the driver will crash or break devices.
*/
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx8mm_clk_of_match),
+ .of_match_table = imx8mm_clk_of_match,
},
};
module_platform_driver(imx8mm_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index f3c5e6cf55dd..3c21db942d5b 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -604,7 +604,7 @@ static struct platform_driver imx8mn_clk_driver = {
* reloading the driver will crash or break devices.
*/
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx8mn_clk_of_match),
+ .of_match_table = imx8mn_clk_of_match,
},
};
module_platform_driver(imx8mn_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 48e212477f52..2f4e1d674e1c 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -425,7 +425,7 @@ static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
static int imx8mp_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
+ struct device_node *np;
void __iomem *anatop_base, *ccm_base;
int i;
@@ -763,7 +763,7 @@ static struct platform_driver imx8mp_clk_driver = {
* reloading the driver will crash or break devices.
*/
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx8mp_clk_of_match),
+ .of_match_table = imx8mp_clk_of_match,
},
};
module_platform_driver(imx8mp_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 06292d4a98ff..779ea69e639c 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -639,7 +639,7 @@ static struct platform_driver imx8mq_clk_driver = {
* reloading the driver will crash or break devices.
*/
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx8mq_clk_of_match),
+ .of_match_table = imx8mq_clk_of_match,
},
};
module_platform_driver(imx8mq_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index e947a70054ac..d3e905cf867d 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -9,8 +9,10 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-scu.h"
@@ -157,6 +159,135 @@ static const struct imx8qxp_ss_lpcg imx8qxp_ss_lsio = {
.num_max = IMX_LSIO_LPCG_CLK_END,
};
+#define IMX_LPCG_MAX_CLKS 8
+
+static struct clk_hw *imx_lpcg_of_clk_src_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_hw_onecell_data *hw_data = data;
+ unsigned int idx = clkspec->args[0] / 4;
+
+ if (idx >= hw_data->num) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return hw_data->hws[idx];
+}
+
+static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ const char *output_names[IMX_LPCG_MAX_CLKS];
+ const char *parent_names[IMX_LPCG_MAX_CLKS];
+ unsigned int bit_offset[IMX_LPCG_MAX_CLKS];
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clk_hws;
+ struct resource *res;
+ void __iomem *base;
+ int count;
+ int idx;
+ int ret;
+ int i;
+
+ if (!of_device_is_compatible(np, "fsl,imx8qxp-lpcg"))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ count = of_property_count_u32_elems(np, "clock-indices");
+ if (count < 0) {
+ dev_err(&pdev->dev, "failed to count clocks\n");
+ return -EINVAL;
+ }
+
+ /*
+ * A trick here is that we set the num of clks to the MAX instead
+ * of the count from clock-indices because one LPCG supports up to
+ * 8 clock outputs which each of them is fixed to 4 bits. Then we can
+ * easily get the clock by clk-indices (bit-offset) / 4.
+ * And the cost is very limited few pointers.
+ */
+
+ clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hws,
+ IMX_LPCG_MAX_CLKS), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX_LPCG_MAX_CLKS;
+ clk_hws = clk_data->hws;
+
+ ret = of_property_read_u32_array(np, "clock-indices", bit_offset,
+ count);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read clock-indices\n");
+ return -EINVAL;
+ }
+
+ ret = of_clk_parent_fill(np, parent_names, count);
+ if (ret != count) {
+ dev_err(&pdev->dev, "failed to get clock parent names\n");
+ return count;
+ }
+
+ ret = of_property_read_string_array(np, "clock-output-names",
+ output_names, count);
+ if (ret != count) {
+ dev_err(&pdev->dev, "failed to read clock-output-names\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ for (i = 0; i < count; i++) {
+ idx = bit_offset[i] / 4;
+ if (idx > IMX_LPCG_MAX_CLKS) {
+ dev_warn(&pdev->dev, "invalid bit offset of clock %d\n",
+ i);
+ ret = -EINVAL;
+ goto unreg;
+ }
+
+ clk_hws[idx] = imx_clk_lpcg_scu_dev(&pdev->dev, output_names[i],
+ parent_names[i], 0, base,
+ bit_offset[i], false);
+ if (IS_ERR(clk_hws[idx])) {
+ dev_warn(&pdev->dev, "failed to register clock %d\n",
+ idx);
+ ret = PTR_ERR(clk_hws[idx]);
+ goto unreg;
+ }
+ }
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, imx_lpcg_of_clk_src_get,
+ clk_data);
+ if (ret)
+ goto unreg;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+unreg:
+ while (--i >= 0) {
+ idx = bit_offset[i] / 4;
+ if (clk_hws[idx])
+ imx_clk_lpcg_scu_unregister(clk_hws[idx]);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -167,8 +298,14 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
struct resource *res;
struct clk_hw **clks;
void __iomem *base;
+ int ret;
int i;
+ /* try new binding to parse clocks from device tree first */
+ ret = imx_lpcg_parse_clks_from_dt(pdev, np);
+ if (!ret)
+ return 0;
+
ss_lpcg = of_device_get_match_data(dev);
if (!ss_lpcg)
return -ENODEV;
@@ -219,6 +356,7 @@ static const struct of_device_id imx8qxp_lpcg_match[] = {
{ .compatible = "fsl,imx8qxp-lpcg-adma", &imx8qxp_ss_adma, },
{ .compatible = "fsl,imx8qxp-lpcg-conn", &imx8qxp_ss_conn, },
{ .compatible = "fsl,imx8qxp-lpcg-lsio", &imx8qxp_ss_lsio, },
+ { .compatible = "fsl,imx8qxp-lpcg", NULL },
{ /* sentinel */ }
};
@@ -226,6 +364,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
.driver = {
.name = "imx8qxp-lpcg-clk",
.of_match_table = imx8qxp_lpcg_match,
+ .pm = &imx_clk_lpcg_scu_pm_ops,
.suppress_bind_attrs = true,
},
.probe = imx8qxp_lpcg_clk_probe,
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index d650ca33cdc8..5b3d4ede7c7c 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -22,9 +22,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
struct device_node *ccm_node = pdev->dev.of_node;
struct clk_hw_onecell_data *clk_data;
struct clk_hw **clks;
+ u32 clk_cells;
int ret, i;
- ret = imx_clk_scu_init();
+ ret = imx_clk_scu_init(ccm_node);
if (ret)
return ret;
@@ -33,6 +34,9 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
+ if (of_property_read_u32(ccm_node, "#clock-cells", &clk_cells))
+ return -EINVAL;
+
clk_data->num = IMX_SCU_CLK_END;
clks = clk_data->hws;
@@ -55,78 +59,78 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
clks[IMX_LSIO_BUS_CLK] = clk_hw_register_fixed_rate(NULL, "lsio_bus_clk_root", NULL, 0, 100000000);
/* ARM core */
- clks[IMX_A35_CLK] = imx_clk_scu("a35_clk", IMX_SC_R_A35, IMX_SC_PM_CLK_CPU);
+ clks[IMX_A35_CLK] = imx_clk_scu("a35_clk", IMX_SC_R_A35, IMX_SC_PM_CLK_CPU, clk_cells);
/* LSIO SS */
- clks[IMX_LSIO_PWM0_CLK] = imx_clk_scu("pwm0_clk", IMX_SC_R_PWM_0, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM1_CLK] = imx_clk_scu("pwm1_clk", IMX_SC_R_PWM_1, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM2_CLK] = imx_clk_scu("pwm2_clk", IMX_SC_R_PWM_2, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM3_CLK] = imx_clk_scu("pwm3_clk", IMX_SC_R_PWM_3, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM4_CLK] = imx_clk_scu("pwm4_clk", IMX_SC_R_PWM_4, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM5_CLK] = imx_clk_scu("pwm5_clk", IMX_SC_R_PWM_5, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM6_CLK] = imx_clk_scu("pwm6_clk", IMX_SC_R_PWM_6, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM7_CLK] = imx_clk_scu("pwm7_clk", IMX_SC_R_PWM_7, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT0_CLK] = imx_clk_scu("gpt0_clk", IMX_SC_R_GPT_0, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT1_CLK] = imx_clk_scu("gpt1_clk", IMX_SC_R_GPT_1, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT2_CLK] = imx_clk_scu("gpt2_clk", IMX_SC_R_GPT_2, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT3_CLK] = imx_clk_scu("gpt3_clk", IMX_SC_R_GPT_3, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT4_CLK] = imx_clk_scu("gpt4_clk", IMX_SC_R_GPT_4, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_FSPI0_CLK] = imx_clk_scu("fspi0_clk", IMX_SC_R_FSPI_0, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_FSPI1_CLK] = imx_clk_scu("fspi1_clk", IMX_SC_R_FSPI_1, IMX_SC_PM_CLK_PER);
+ clks[IMX_LSIO_PWM0_CLK] = imx_clk_scu("pwm0_clk", IMX_SC_R_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM1_CLK] = imx_clk_scu("pwm1_clk", IMX_SC_R_PWM_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM2_CLK] = imx_clk_scu("pwm2_clk", IMX_SC_R_PWM_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM3_CLK] = imx_clk_scu("pwm3_clk", IMX_SC_R_PWM_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM4_CLK] = imx_clk_scu("pwm4_clk", IMX_SC_R_PWM_4, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM5_CLK] = imx_clk_scu("pwm5_clk", IMX_SC_R_PWM_5, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM6_CLK] = imx_clk_scu("pwm6_clk", IMX_SC_R_PWM_6, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM7_CLK] = imx_clk_scu("pwm7_clk", IMX_SC_R_PWM_7, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT0_CLK] = imx_clk_scu("gpt0_clk", IMX_SC_R_GPT_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT1_CLK] = imx_clk_scu("gpt1_clk", IMX_SC_R_GPT_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT2_CLK] = imx_clk_scu("gpt2_clk", IMX_SC_R_GPT_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT3_CLK] = imx_clk_scu("gpt3_clk", IMX_SC_R_GPT_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT4_CLK] = imx_clk_scu("gpt4_clk", IMX_SC_R_GPT_4, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_FSPI0_CLK] = imx_clk_scu("fspi0_clk", IMX_SC_R_FSPI_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_FSPI1_CLK] = imx_clk_scu("fspi1_clk", IMX_SC_R_FSPI_1, IMX_SC_PM_CLK_PER, clk_cells);
/* ADMA SS */
- clks[IMX_ADMA_UART0_CLK] = imx_clk_scu("uart0_clk", IMX_SC_R_UART_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_UART1_CLK] = imx_clk_scu("uart1_clk", IMX_SC_R_UART_1, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_UART2_CLK] = imx_clk_scu("uart2_clk", IMX_SC_R_UART_2, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_UART3_CLK] = imx_clk_scu("uart3_clk", IMX_SC_R_UART_3, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_SPI0_CLK] = imx_clk_scu("spi0_clk", IMX_SC_R_SPI_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_SPI1_CLK] = imx_clk_scu("spi1_clk", IMX_SC_R_SPI_1, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_SPI2_CLK] = imx_clk_scu("spi2_clk", IMX_SC_R_SPI_2, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_SPI3_CLK] = imx_clk_scu("spi3_clk", IMX_SC_R_SPI_3, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_CAN0_CLK] = imx_clk_scu("can0_clk", IMX_SC_R_CAN_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_I2C0_CLK] = imx_clk_scu("i2c0_clk", IMX_SC_R_I2C_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_I2C1_CLK] = imx_clk_scu("i2c1_clk", IMX_SC_R_I2C_1, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_I2C2_CLK] = imx_clk_scu("i2c2_clk", IMX_SC_R_I2C_2, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_I2C3_CLK] = imx_clk_scu("i2c3_clk", IMX_SC_R_I2C_3, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_FTM0_CLK] = imx_clk_scu("ftm0_clk", IMX_SC_R_FTM_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_FTM1_CLK] = imx_clk_scu("ftm1_clk", IMX_SC_R_FTM_1, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_ADC0_CLK] = imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_PWM_CLK] = imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_LCD_CLK] = imx_clk_scu("lcd_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
+ clks[IMX_ADMA_UART0_CLK] = imx_clk_scu("uart0_clk", IMX_SC_R_UART_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_UART1_CLK] = imx_clk_scu("uart1_clk", IMX_SC_R_UART_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_UART2_CLK] = imx_clk_scu("uart2_clk", IMX_SC_R_UART_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_UART3_CLK] = imx_clk_scu("uart3_clk", IMX_SC_R_UART_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_SPI0_CLK] = imx_clk_scu("spi0_clk", IMX_SC_R_SPI_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_SPI1_CLK] = imx_clk_scu("spi1_clk", IMX_SC_R_SPI_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_SPI2_CLK] = imx_clk_scu("spi2_clk", IMX_SC_R_SPI_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_SPI3_CLK] = imx_clk_scu("spi3_clk", IMX_SC_R_SPI_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_CAN0_CLK] = imx_clk_scu("can0_clk", IMX_SC_R_CAN_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_I2C0_CLK] = imx_clk_scu("i2c0_clk", IMX_SC_R_I2C_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_I2C1_CLK] = imx_clk_scu("i2c1_clk", IMX_SC_R_I2C_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_I2C2_CLK] = imx_clk_scu("i2c2_clk", IMX_SC_R_I2C_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_I2C3_CLK] = imx_clk_scu("i2c3_clk", IMX_SC_R_I2C_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_FTM0_CLK] = imx_clk_scu("ftm0_clk", IMX_SC_R_FTM_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_FTM1_CLK] = imx_clk_scu("ftm1_clk", IMX_SC_R_FTM_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_ADC0_CLK] = imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_PWM_CLK] = imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_LCD_CLK] = imx_clk_scu("lcd_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER, clk_cells);
/* Connectivity */
- clks[IMX_CONN_SDHC0_CLK] = imx_clk_scu("sdhc0_clk", IMX_SC_R_SDHC_0, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_SDHC1_CLK] = imx_clk_scu("sdhc1_clk", IMX_SC_R_SDHC_1, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_SDHC2_CLK] = imx_clk_scu("sdhc2_clk", IMX_SC_R_SDHC_2, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_ENET0_ROOT_CLK] = imx_clk_scu("enet0_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_ENET0_BYPASS_CLK] = imx_clk_scu("enet0_bypass_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_BYPASS);
- clks[IMX_CONN_ENET0_RGMII_CLK] = imx_clk_scu("enet0_rgmii_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_MISC0);
- clks[IMX_CONN_ENET1_ROOT_CLK] = imx_clk_scu("enet1_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_ENET1_BYPASS_CLK] = imx_clk_scu("enet1_bypass_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_BYPASS);
- clks[IMX_CONN_ENET1_RGMII_CLK] = imx_clk_scu("enet1_rgmii_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_MISC0);
- clks[IMX_CONN_GPMI_BCH_IO_CLK] = imx_clk_scu("gpmi_io_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_MST_BUS);
- clks[IMX_CONN_GPMI_BCH_CLK] = imx_clk_scu("gpmi_bch_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_USB2_ACLK] = imx_clk_scu("usb3_aclk_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_USB2_BUS_CLK] = imx_clk_scu("usb3_bus_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MST_BUS);
- clks[IMX_CONN_USB2_LPM_CLK] = imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
+ clks[IMX_CONN_SDHC0_CLK] = imx_clk_scu("sdhc0_clk", IMX_SC_R_SDHC_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_SDHC1_CLK] = imx_clk_scu("sdhc1_clk", IMX_SC_R_SDHC_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_SDHC2_CLK] = imx_clk_scu("sdhc2_clk", IMX_SC_R_SDHC_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_ENET0_ROOT_CLK] = imx_clk_scu("enet0_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_ENET0_BYPASS_CLK] = imx_clk_scu("enet0_bypass_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_CONN_ENET0_RGMII_CLK] = imx_clk_scu("enet0_rgmii_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_MISC0, clk_cells);
+ clks[IMX_CONN_ENET1_ROOT_CLK] = imx_clk_scu("enet1_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_ENET1_BYPASS_CLK] = imx_clk_scu("enet1_bypass_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_CONN_ENET1_RGMII_CLK] = imx_clk_scu("enet1_rgmii_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_MISC0, clk_cells);
+ clks[IMX_CONN_GPMI_BCH_IO_CLK] = imx_clk_scu("gpmi_io_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_MST_BUS, clk_cells);
+ clks[IMX_CONN_GPMI_BCH_CLK] = imx_clk_scu("gpmi_bch_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_USB2_ACLK] = imx_clk_scu("usb3_aclk_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_USB2_BUS_CLK] = imx_clk_scu("usb3_bus_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MST_BUS, clk_cells);
+ clks[IMX_CONN_USB2_LPM_CLK] = imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC, clk_cells);
/* Display controller SS */
- clks[IMX_DC0_DISP0_CLK] = imx_clk_scu("dc0_disp0_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
- clks[IMX_DC0_DISP1_CLK] = imx_clk_scu("dc0_disp1_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
+ clks[IMX_DC0_DISP0_CLK] = imx_clk_scu("dc0_disp0_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0, clk_cells);
+ clks[IMX_DC0_DISP1_CLK] = imx_clk_scu("dc0_disp1_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1, clk_cells);
/* MIPI-LVDS SS */
- clks[IMX_MIPI0_I2C0_CLK] = imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2);
- clks[IMX_MIPI0_I2C1_CLK] = imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2);
+ clks[IMX_MIPI0_I2C0_CLK] = imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI0_I2C1_CLK] = imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2, clk_cells);
/* MIPI CSI SS */
- clks[IMX_CSI0_CORE_CLK] = imx_clk_scu("mipi_csi0_core_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_PER);
- clks[IMX_CSI0_ESC_CLK] = imx_clk_scu("mipi_csi0_esc_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_MISC);
- clks[IMX_CSI0_I2C0_CLK] = imx_clk_scu("mipi_csi0_i2c0_clk", IMX_SC_R_CSI_0_I2C_0, IMX_SC_PM_CLK_PER);
- clks[IMX_CSI0_PWM0_CLK] = imx_clk_scu("mipi_csi0_pwm0_clk", IMX_SC_R_CSI_0_PWM_0, IMX_SC_PM_CLK_PER);
+ clks[IMX_CSI0_CORE_CLK] = imx_clk_scu("mipi_csi0_core_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CSI0_ESC_CLK] = imx_clk_scu("mipi_csi0_esc_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_MISC, clk_cells);
+ clks[IMX_CSI0_I2C0_CLK] = imx_clk_scu("mipi_csi0_i2c0_clk", IMX_SC_R_CSI_0_I2C_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CSI0_PWM0_CLK] = imx_clk_scu("mipi_csi0_pwm0_clk", IMX_SC_R_CSI_0_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
/* GPU SS */
- clks[IMX_GPU0_CORE_CLK] = imx_clk_scu("gpu_core0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_PER);
- clks[IMX_GPU0_SHADER_CLK] = imx_clk_scu("gpu_shader0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_MISC);
+ clks[IMX_GPU0_CORE_CLK] = imx_clk_scu("gpu_core0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_GPU0_SHADER_CLK] = imx_clk_scu("gpu_shader0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_MISC, clk_cells);
for (i = 0; i < clk_data->num; i++) {
if (IS_ERR(clks[i]))
@@ -134,7 +138,19 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
i, PTR_ERR(clks[i]));
}
- return of_clk_add_hw_provider(ccm_node, of_clk_hw_onecell_get, clk_data);
+ if (clk_cells == 2) {
+ ret = of_clk_add_hw_provider(ccm_node, imx_scu_of_clk_src_get, imx_scu_clks);
+ if (ret)
+ imx_clk_scu_unregister();
+ } else {
+ /*
+ * legacy binding code path doesn't unregister here because
+ * it will be removed later.
+ */
+ ret = of_clk_add_hw_provider(ccm_node, of_clk_hw_onecell_get, clk_data);
+ }
+
+ return ret;
}
static const struct of_device_id imx8qxp_match[] = {
diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c
index 1f0e44f921ae..77be7632866d 100644
--- a/drivers/clk/imx/clk-lpcg-scu.c
+++ b/drivers/clk/imx/clk-lpcg-scu.c
@@ -34,6 +34,9 @@ struct clk_lpcg_scu {
void __iomem *reg;
u8 bit_idx;
bool hw_gate;
+
+ /* for state save&restore */
+ u32 state;
};
#define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw)
@@ -81,9 +84,9 @@ static const struct clk_ops clk_lpcg_scu_ops = {
.disable = clk_lpcg_scu_disable,
};
-struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
- unsigned long flags, void __iomem *reg,
- u8 bit_idx, bool hw_gate)
+struct clk_hw *__imx_clk_lpcg_scu(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx, bool hw_gate)
{
struct clk_lpcg_scu *clk;
struct clk_init_data init;
@@ -107,11 +110,53 @@ struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
clk->hw.init = &init;
hw = &clk->hw;
- ret = clk_hw_register(NULL, hw);
+ ret = clk_hw_register(dev, hw);
if (ret) {
kfree(clk);
hw = ERR_PTR(ret);
}
+ if (dev)
+ dev_set_drvdata(dev, clk);
+
return hw;
}
+
+void imx_clk_lpcg_scu_unregister(struct clk_hw *hw)
+{
+ struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
+
+ clk_hw_unregister(&clk->hw);
+ kfree(clk);
+}
+
+static int __maybe_unused imx_clk_lpcg_scu_suspend(struct device *dev)
+{
+ struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
+
+ clk->state = readl_relaxed(clk->reg);
+ dev_dbg(dev, "save lpcg state 0x%x\n", clk->state);
+
+ return 0;
+}
+
+static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev)
+{
+ struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
+
+ /*
+ * FIXME: Sometimes writes don't work unless the CPU issues
+ * them twice
+ */
+
+ writel(clk->state, clk->reg);
+ writel(clk->state, clk->reg);
+ dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state);
+
+ return 0;
+}
+
+const struct dev_pm_ops imx_clk_lpcg_scu_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_lpcg_scu_suspend,
+ imx_clk_lpcg_scu_resume)
+};
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index aba36e4217d2..2b5ed86b9dbb 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -416,7 +416,7 @@ struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
__func__, name);
kfree(pll);
return ERR_PTR(-EINVAL);
- };
+ }
pll->base = base;
pll->hw.init = &init;
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index b8b2072742a5..1f5518b7ab39 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -8,6 +8,10 @@
#include <linux/arm-smccc.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-scu.h"
@@ -16,6 +20,21 @@
#define IMX_SIP_SET_CPUFREQ 0x00
static struct imx_sc_ipc *ccm_ipc_handle;
+static struct device_node *pd_np;
+static struct platform_driver imx_clk_scu_driver;
+
+struct imx_scu_clk_node {
+ const char *name;
+ u32 rsrc;
+ u8 clk_type;
+ const char * const *parents;
+ int num_parents;
+
+ struct clk_hw *hw;
+ struct list_head node;
+};
+
+struct list_head imx_scu_clks[IMX_SC_R_LAST];
/*
* struct clk_scu - Description of one SCU clock
@@ -27,6 +46,10 @@ struct clk_scu {
struct clk_hw hw;
u16 rsrc_id;
u8 clk_type;
+
+ /* for state save&restore */
+ bool is_enabled;
+ u32 rate;
};
/*
@@ -128,9 +151,28 @@ static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
return container_of(hw, struct clk_scu, hw);
}
-int imx_clk_scu_init(void)
+int imx_clk_scu_init(struct device_node *np)
{
- return imx_scu_get_handle(&ccm_ipc_handle);
+ u32 clk_cells;
+ int ret, i;
+
+ ret = imx_scu_get_handle(&ccm_ipc_handle);
+ if (ret)
+ return ret;
+
+ of_property_read_u32(np, "#clock-cells", &clk_cells);
+
+ if (clk_cells == 2) {
+ for (i = 0; i < IMX_SC_R_LAST; i++)
+ INIT_LIST_HEAD(&imx_scu_clks[i]);
+
+ /* pd_np will be used to attach power domains later */
+ pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
+ if (!pd_np)
+ return -EINVAL;
+ }
+
+ return platform_driver_register(&imx_clk_scu_driver);
}
/*
@@ -344,8 +386,9 @@ static const struct clk_ops clk_scu_cpu_ops = {
.unprepare = clk_scu_unprepare,
};
-struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
- int num_parents, u32 rsrc_id, u8 clk_type)
+struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
+ const char * const *parents, int num_parents,
+ u32 rsrc_id, u8 clk_type)
{
struct clk_init_data init;
struct clk_scu *clk;
@@ -379,11 +422,185 @@ struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
clk->hw.init = &init;
hw = &clk->hw;
- ret = clk_hw_register(NULL, hw);
+ ret = clk_hw_register(dev, hw);
if (ret) {
kfree(clk);
hw = ERR_PTR(ret);
}
+ if (dev)
+ dev_set_drvdata(dev, clk);
+
return hw;
}
+
+struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ unsigned int rsrc = clkspec->args[0];
+ unsigned int idx = clkspec->args[1];
+ struct list_head *scu_clks = data;
+ struct imx_scu_clk_node *clk;
+
+ list_for_each_entry(clk, &scu_clks[rsrc], node) {
+ if (clk->clk_type == idx)
+ return clk->hw;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int imx_clk_scu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_scu_clk_node *clk = dev_get_platdata(dev);
+ struct clk_hw *hw;
+ int ret;
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
+ clk->rsrc, clk->clk_type);
+ if (IS_ERR(hw)) {
+ pm_runtime_disable(dev);
+ return PTR_ERR(hw);
+ }
+
+ clk->hw = hw;
+ list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
+ clk->clk_type);
+
+ return 0;
+}
+
+static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
+{
+ struct clk_scu *clk = dev_get_drvdata(dev);
+
+ clk->rate = clk_hw_get_rate(&clk->hw);
+ clk->is_enabled = clk_hw_is_enabled(&clk->hw);
+
+ if (clk->rate)
+ dev_dbg(dev, "save rate %d\n", clk->rate);
+
+ if (clk->is_enabled)
+ dev_dbg(dev, "save enabled state\n");
+
+ return 0;
+}
+
+static int __maybe_unused imx_clk_scu_resume(struct device *dev)
+{
+ struct clk_scu *clk = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (clk->rate) {
+ ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
+ dev_dbg(dev, "restore rate %d %s\n", clk->rate,
+ !ret ? "success" : "failed");
+ }
+
+ if (clk->is_enabled) {
+ ret = clk_scu_prepare(&clk->hw);
+ dev_dbg(dev, "restore enabled state %s\n",
+ !ret ? "success" : "failed");
+ }
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_clk_scu_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
+ imx_clk_scu_resume)
+};
+
+static struct platform_driver imx_clk_scu_driver = {
+ .driver = {
+ .name = "imx-scu-clk",
+ .suppress_bind_attrs = true,
+ .pm = &imx_clk_scu_pm_ops,
+ },
+ .probe = imx_clk_scu_probe,
+};
+
+static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
+{
+ struct of_phandle_args genpdspec = {
+ .np = pd_np,
+ .args_count = 1,
+ .args[0] = rsrc_id,
+ };
+
+ if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
+ rsrc_id == IMX_SC_R_A72)
+ return 0;
+
+ return of_genpd_add_device(&genpdspec, dev);
+}
+
+struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
+ const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type)
+{
+ struct imx_scu_clk_node clk = {
+ .name = name,
+ .rsrc = rsrc_id,
+ .clk_type = clk_type,
+ .parents = parents,
+ .num_parents = num_parents,
+ };
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
+ if (!pdev) {
+ pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
+ name, rsrc_id, clk_type);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = platform_device_add_data(pdev, &clk, sizeof(clk));
+ if (ret) {
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
+
+ pdev->driver_override = "imx-scu-clk";
+
+ ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
+ if (ret)
+ pr_warn("%s: failed to attached the power domain %d\n",
+ name, ret);
+
+ platform_device_add(pdev);
+
+ /* For API backwards compatiblilty, simply return NULL for success */
+ return NULL;
+}
+
+void imx_clk_scu_unregister(void)
+{
+ struct imx_scu_clk_node *clk;
+ int i;
+
+ for (i = 0; i < IMX_SC_R_LAST; i++) {
+ list_for_each_entry(clk, &imx_scu_clks[i], node) {
+ clk_hw_unregister(clk->hw);
+ kfree(clk);
+ }
+ }
+}
diff --git a/drivers/clk/imx/clk-scu.h b/drivers/clk/imx/clk-scu.h
index 2bcfaf06a458..e8352164923e 100644
--- a/drivers/clk/imx/clk-scu.h
+++ b/drivers/clk/imx/clk-scu.h
@@ -8,25 +8,61 @@
#define __IMX_CLK_SCU_H
#include <linux/firmware/imx/sci.h>
+#include <linux/of.h>
-int imx_clk_scu_init(void);
+extern struct list_head imx_scu_clks[];
+extern const struct dev_pm_ops imx_clk_lpcg_scu_pm_ops;
-struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
- int num_parents, u32 rsrc_id, u8 clk_type);
+int imx_clk_scu_init(struct device_node *np);
+struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
+ void *data);
+struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
+ const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type);
+
+struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
+ const char * const *parents, int num_parents,
+ u32 rsrc_id, u8 clk_type);
+
+void imx_clk_scu_unregister(void);
+
+struct clk_hw *__imx_clk_lpcg_scu(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx, bool hw_gate);
+void imx_clk_lpcg_scu_unregister(struct clk_hw *hw);
static inline struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id,
- u8 clk_type)
+ u8 clk_type, u8 clk_cells)
{
- return __imx_clk_scu(name, NULL, 0, rsrc_id, clk_type);
+ if (clk_cells == 2)
+ return imx_clk_scu_alloc_dev(name, NULL, 0, rsrc_id, clk_type);
+ else
+ return __imx_clk_scu(NULL, name, NULL, 0, rsrc_id, clk_type);
}
static inline struct clk_hw *imx_clk_scu2(const char *name, const char * const *parents,
- int num_parents, u32 rsrc_id, u8 clk_type)
+ int num_parents, u32 rsrc_id, u8 clk_type,
+ u8 clk_cells)
+{
+ if (clk_cells == 2)
+ return imx_clk_scu_alloc_dev(name, parents, num_parents, rsrc_id, clk_type);
+ else
+ return __imx_clk_scu(NULL, name, parents, num_parents, rsrc_id, clk_type);
+}
+
+static inline struct clk_hw *imx_clk_lpcg_scu_dev(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx, bool hw_gate)
{
- return __imx_clk_scu(name, parents, num_parents, rsrc_id, clk_type);
+ return __imx_clk_lpcg_scu(dev, name, parent_name, flags, reg,
+ bit_idx, hw_gate);
}
-struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
- unsigned long flags, void __iomem *reg,
- u8 bit_idx, bool hw_gate);
+static inline struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ u8 bit_idx, bool hw_gate)
+{
+ return __imx_clk_lpcg_scu(NULL, name, parent_name, flags, reg,
+ bit_idx, hw_gate);
+}
#endif
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 1d7be0c86538..4f04c8287286 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -6,8 +6,6 @@
#include <linux/spinlock.h>
#include <linux/clk-provider.h>
-#define IMX_CLK_GATE2_SINGLE_BIT 1
-
extern spinlock_t imx_ccm_lock;
void imx_check_clocks(struct clk *clks[], unsigned int count);
@@ -68,9 +66,9 @@ extern struct imx_pll14xx_clk imx_1443x_dram_pll;
to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
#define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
- cgr_val, clk_gate_flags, lock, share_count) \
+ cgr_val, cgr_mask, clk_gate_flags, lock, share_count) \
to_clk(clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
- cgr_val, clk_gate_flags, lock, share_count))
+ cgr_val, cgr_mask, clk_gate_flags, lock, share_count))
#define imx_clk_pllv3(type, name, parent_name, base, div_mask) \
to_clk(imx_clk_hw_pllv3(type, name, parent_name, base, div_mask))
@@ -198,7 +196,7 @@ struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 bit_idx, u8 cgr_val,
+ void __iomem *reg, u8 bit_idx, u8 cgr_val, u8 cgr_mask,
u8 clk_gate_flags, spinlock_t *lock,
unsigned int *share_count);
@@ -351,14 +349,14 @@ static inline struct clk_hw *imx_clk_hw_gate2(const char *name, const char *pare
void __iomem *reg, u8 shift)
{
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0, &imx_ccm_lock, NULL);
+ shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
}
static inline struct clk_hw *imx_clk_hw_gate2_flags(const char *name, const char *parent,
void __iomem *reg, u8 shift, unsigned long flags)
{
return clk_hw_register_gate2(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0, &imx_ccm_lock, NULL);
+ shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
}
static inline struct clk_hw *imx_clk_hw_gate2_shared(const char *name,
@@ -366,7 +364,7 @@ static inline struct clk_hw *imx_clk_hw_gate2_shared(const char *name,
unsigned int *share_count)
{
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0, &imx_ccm_lock, share_count);
+ shift, 0x3, 0x3, 0, &imx_ccm_lock, share_count);
}
static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
@@ -374,7 +372,7 @@ static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
unsigned int *share_count)
{
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
- CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0,
+ CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0x3, 0,
&imx_ccm_lock, share_count);
}
@@ -384,16 +382,15 @@ static inline struct clk_hw *imx_dev_clk_hw_gate_shared(struct device *dev,
unsigned int *share_count)
{
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
- CLK_OPS_PARENT_ENABLE, reg, shift, 0x3,
- IMX_CLK_GATE2_SINGLE_BIT,
- &imx_ccm_lock, share_count);
+ CLK_OPS_PARENT_ENABLE, reg, shift, 0x1,
+ 0x1, 0, &imx_ccm_lock, share_count);
}
static inline struct clk *imx_clk_gate2_cgr(const char *name,
const char *parent, void __iomem *reg, u8 shift, u8 cgr_val)
{
return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, cgr_val, 0, &imx_ccm_lock, NULL);
+ shift, cgr_val, 0x3, 0, &imx_ccm_lock, NULL);
}
static inline struct clk_hw *imx_clk_hw_gate3(const char *name, const char *parent,
@@ -421,7 +418,7 @@ static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *pare
{
return clk_hw_register_gate2(NULL, name, parent,
CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
+ reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
}
static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
@@ -430,7 +427,7 @@ static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
{
return clk_hw_register_gate2(NULL, name, parent,
flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
+ reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
}
#define imx_clk_gate4_flags(name, parent, reg, shift, flags) \
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index dac6edc670cc..c8e9cb6c8e39 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -392,15 +392,21 @@ static unsigned int
ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
unsigned int div)
{
- unsigned int i;
+ unsigned int i, best_i = 0, best = (unsigned int)-1;
for (i = 0; i < (1 << clk_info->div.bits)
&& clk_info->div.div_table[i]; i++) {
- if (clk_info->div.div_table[i] >= div)
- return i;
+ if (clk_info->div.div_table[i] >= div &&
+ clk_info->div.div_table[i] < best) {
+ best = clk_info->div.div_table[i];
+ best_i = i;
+
+ if (div == best)
+ break;
+ }
}
- return i - 1;
+ return best_i;
}
static unsigned
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 14e127e9a740..dcc1352bf13c 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -155,7 +155,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
};
-struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
+static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
struct regmap *regmap,
spinlock_t *lock)
{
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index f5625f4d9e6c..8e2f927dd2ff 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -77,10 +77,6 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
_width, _gate, _upd_ofs, _upd, \
CLK_SET_RATE_PARENT)
-struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
- struct regmap *regmap,
- spinlock_t *lock);
-
int mtk_clk_register_muxes(const struct mtk_mux *muxes,
int num, struct device_node *node,
spinlock_t *lock,
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 034da203e8e0..fc002c155bc3 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -58,7 +58,7 @@ config COMMON_CLK_MESON8B
want peripherals and CPU frequency scaling to work.
config COMMON_CLK_GXBB
- bool "GXBB and GXL SoC clock controllers support"
+ tristate "GXBB and GXL SoC clock controllers support"
depends on ARM64
default y
select COMMON_CLK_MESON_REGMAP
@@ -74,7 +74,7 @@ config COMMON_CLK_GXBB
Say Y if you want peripherals and CPU frequency scaling to work.
config COMMON_CLK_AXG
- bool "AXG SoC clock controllers support"
+ tristate "AXG SoC clock controllers support"
depends on ARM64
default y
select COMMON_CLK_MESON_REGMAP
@@ -100,7 +100,7 @@ config COMMON_CLK_AXG_AUDIO
aka axg, Say Y if you want audio subsystem to work.
config COMMON_CLK_G12A
- bool "G12 and SM1 SoC clock controllers support"
+ tristate "G12 and SM1 SoC clock controllers support"
depends on ARM64
default y
select COMMON_CLK_MESON_REGMAP
@@ -110,6 +110,7 @@ config COMMON_CLK_G12A
select COMMON_CLK_MESON_AO_CLKC
select COMMON_CLK_MESON_EE_CLKC
select COMMON_CLK_MESON_CPU_DYNDIV
+ select COMMON_CLK_MESON_VID_PLL_DIV
select MFD_SYSCON
help
Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index b488b40c9d0e..af6db437bcd8 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include "meson-aoclk.h"
#include "axg-aoclk.h"
@@ -326,6 +327,7 @@ static const struct of_device_id axg_aoclkc_match_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, axg_aoclkc_match_table);
static struct platform_driver axg_aoclkc_driver = {
.probe = meson_aoclkc_probe,
@@ -335,4 +337,5 @@ static struct platform_driver axg_aoclkc_driver = {
},
};
-builtin_platform_driver(axg_aoclkc_driver);
+module_platform_driver(axg_aoclkc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 13fc0006f63d..0e44695b8772 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include "clk-regmap.h"
#include "clk-pll.h"
@@ -1026,6 +1027,743 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
},
};
+/* VPU Clock */
+
+static const struct clk_hw *axg_vpu_parent_hws[] = {
+ &axg_fclk_div4.hw,
+ &axg_fclk_div3.hw,
+ &axg_fclk_div5.hw,
+ &axg_fclk_div7.hw,
+};
+
+static struct clk_regmap axg_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ /* We need a specific parent for VPU clock source, let it be set in DT */
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vpu_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vpu_0_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vpu_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vpu_0_div.hw },
+ .num_parents = 1,
+ /*
+ * We want to avoid CCF to disable the VPU clock if
+ * display has been set by Bootloader
+ */
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ /* We need a specific parent for VPU clock source, let it be set in DT */
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vpu_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vpu_1_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vpu_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vpu_1_div.hw },
+ .num_parents = 1,
+ /*
+ * We want to avoid CCF to disable the VPU clock if
+ * display has been set by Bootloader
+ */
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vpu = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vpu_0.hw,
+ &axg_vpu_1.hw
+ },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+/* VAPB Clock */
+
+static struct clk_regmap axg_vapb_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_0_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_0_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vapb_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_1_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_1_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vapb_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_0.hw,
+ &axg_vapb_1.hw
+ },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vapb_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* Video Clocks */
+
+static const struct clk_hw *axg_vclk_parent_hws[] = {
+ &axg_gp0_pll.hw,
+ &axg_fclk_div4.hw,
+ &axg_fclk_div3.hw,
+ &axg_fclk_div5.hw,
+ &axg_fclk_div2.hw,
+ &axg_fclk_div7.hw,
+ &axg_mpll1.hw,
+};
+
+static struct clk_regmap axg_vclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vclk_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_vclk2_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vclk_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_vclk_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_input.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_input.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_vclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk_div.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2_div.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_div2_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_div4_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_div6_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_div12_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk2_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_div2_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk2_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_div4_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk2_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_div6_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk2_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_div12_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *axg_cts_parent_hws[] = {
+ &axg_vclk_div1.hw,
+ &axg_vclk_div2.hw,
+ &axg_vclk_div4.hw,
+ &axg_vclk_div6.hw,
+ &axg_vclk_div12.hw,
+ &axg_vclk2_div1.hw,
+ &axg_vclk2_div2.hw,
+ &axg_vclk2_div4.hw,
+ &axg_vclk2_div6.hw,
+ &axg_vclk2_div12.hw,
+};
+
+static struct clk_regmap axg_cts_encl_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 12,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encl_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_cts_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_cts_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_cts_encl = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_encl",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_cts_encl_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* MIPI DSI Host Clock */
+
+static u32 mux_table_axg_vdin_meas[] = { 0, 1, 2, 3, 6, 7 };
+static const struct clk_parent_data axg_vdin_meas_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &axg_fclk_div4.hw },
+ { .hw = &axg_fclk_div3.hw },
+ { .hw = &axg_fclk_div5.hw },
+ { .hw = &axg_fclk_div2.hw },
+ { .hw = &axg_fclk_div7.hw },
+};
+
+static struct clk_regmap axg_vdin_meas_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDIN_MEAS_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 21,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ .table = mux_table_axg_vdin_meas,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdin_meas_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = axg_vdin_meas_parent_data,
+ .num_parents = ARRAY_SIZE(axg_vdin_meas_parent_data),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vdin_meas_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDIN_MEAS_CLK_CNTL,
+ .shift = 12,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdin_meas_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vdin_meas_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vdin_meas = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDIN_MEAS_CLK_CNTL,
+ .bit_idx = 20,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdin_meas",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vdin_meas_div.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
9, 10, 11, 13, 14, };
static const struct clk_parent_data gen_clk_parent_data[] = {
@@ -1246,6 +1984,52 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_HIFI_PLL_DCO] = &axg_hifi_pll_dco.hw,
[CLKID_PCIE_PLL_DCO] = &axg_pcie_pll_dco.hw,
[CLKID_PCIE_PLL_OD] = &axg_pcie_pll_od.hw,
+ [CLKID_VPU_0_DIV] = &axg_vpu_0_div.hw,
+ [CLKID_VPU_0_SEL] = &axg_vpu_0_sel.hw,
+ [CLKID_VPU_0] = &axg_vpu_0.hw,
+ [CLKID_VPU_1_DIV] = &axg_vpu_1_div.hw,
+ [CLKID_VPU_1_SEL] = &axg_vpu_1_sel.hw,
+ [CLKID_VPU_1] = &axg_vpu_1.hw,
+ [CLKID_VPU] = &axg_vpu.hw,
+ [CLKID_VAPB_0_DIV] = &axg_vapb_0_div.hw,
+ [CLKID_VAPB_0_SEL] = &axg_vapb_0_sel.hw,
+ [CLKID_VAPB_0] = &axg_vapb_0.hw,
+ [CLKID_VAPB_1_DIV] = &axg_vapb_1_div.hw,
+ [CLKID_VAPB_1_SEL] = &axg_vapb_1_sel.hw,
+ [CLKID_VAPB_1] = &axg_vapb_1.hw,
+ [CLKID_VAPB_SEL] = &axg_vapb_sel.hw,
+ [CLKID_VAPB] = &axg_vapb.hw,
+ [CLKID_VCLK] = &axg_vclk.hw,
+ [CLKID_VCLK2] = &axg_vclk2.hw,
+ [CLKID_VCLK_SEL] = &axg_vclk_sel.hw,
+ [CLKID_VCLK2_SEL] = &axg_vclk2_sel.hw,
+ [CLKID_VCLK_INPUT] = &axg_vclk_input.hw,
+ [CLKID_VCLK2_INPUT] = &axg_vclk2_input.hw,
+ [CLKID_VCLK_DIV] = &axg_vclk_div.hw,
+ [CLKID_VCLK2_DIV] = &axg_vclk2_div.hw,
+ [CLKID_VCLK_DIV2_EN] = &axg_vclk_div2_en.hw,
+ [CLKID_VCLK_DIV4_EN] = &axg_vclk_div4_en.hw,
+ [CLKID_VCLK_DIV6_EN] = &axg_vclk_div6_en.hw,
+ [CLKID_VCLK_DIV12_EN] = &axg_vclk_div12_en.hw,
+ [CLKID_VCLK2_DIV2_EN] = &axg_vclk2_div2_en.hw,
+ [CLKID_VCLK2_DIV4_EN] = &axg_vclk2_div4_en.hw,
+ [CLKID_VCLK2_DIV6_EN] = &axg_vclk2_div6_en.hw,
+ [CLKID_VCLK2_DIV12_EN] = &axg_vclk2_div12_en.hw,
+ [CLKID_VCLK_DIV1] = &axg_vclk_div1.hw,
+ [CLKID_VCLK_DIV2] = &axg_vclk_div2.hw,
+ [CLKID_VCLK_DIV4] = &axg_vclk_div4.hw,
+ [CLKID_VCLK_DIV6] = &axg_vclk_div6.hw,
+ [CLKID_VCLK_DIV12] = &axg_vclk_div12.hw,
+ [CLKID_VCLK2_DIV1] = &axg_vclk2_div1.hw,
+ [CLKID_VCLK2_DIV2] = &axg_vclk2_div2.hw,
+ [CLKID_VCLK2_DIV4] = &axg_vclk2_div4.hw,
+ [CLKID_VCLK2_DIV6] = &axg_vclk2_div6.hw,
+ [CLKID_VCLK2_DIV12] = &axg_vclk2_div12.hw,
+ [CLKID_CTS_ENCL_SEL] = &axg_cts_encl_sel.hw,
+ [CLKID_CTS_ENCL] = &axg_cts_encl.hw,
+ [CLKID_VDIN_MEAS_SEL] = &axg_vdin_meas_sel.hw,
+ [CLKID_VDIN_MEAS_DIV] = &axg_vdin_meas_div.hw,
+ [CLKID_VDIN_MEAS] = &axg_vdin_meas.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1341,6 +2125,42 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_hifi_pll_dco,
&axg_pcie_pll_dco,
&axg_pcie_pll_od,
+ &axg_vpu_0_div,
+ &axg_vpu_0_sel,
+ &axg_vpu_0,
+ &axg_vpu_1_div,
+ &axg_vpu_1_sel,
+ &axg_vpu_1,
+ &axg_vpu,
+ &axg_vapb_0_div,
+ &axg_vapb_0_sel,
+ &axg_vapb_0,
+ &axg_vapb_1_div,
+ &axg_vapb_1_sel,
+ &axg_vapb_1,
+ &axg_vapb_sel,
+ &axg_vapb,
+ &axg_vclk,
+ &axg_vclk2,
+ &axg_vclk_sel,
+ &axg_vclk2_sel,
+ &axg_vclk_input,
+ &axg_vclk2_input,
+ &axg_vclk_div,
+ &axg_vclk2_div,
+ &axg_vclk_div2_en,
+ &axg_vclk_div4_en,
+ &axg_vclk_div6_en,
+ &axg_vclk_div12_en,
+ &axg_vclk2_div2_en,
+ &axg_vclk2_div4_en,
+ &axg_vclk2_div6_en,
+ &axg_vclk2_div12_en,
+ &axg_cts_encl_sel,
+ &axg_cts_encl,
+ &axg_vdin_meas_sel,
+ &axg_vdin_meas_div,
+ &axg_vdin_meas,
};
static const struct meson_eeclkc_data axg_clkc_data = {
@@ -1354,6 +2174,7 @@ static const struct of_device_id clkc_match_table[] = {
{ .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
{}
};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
static struct platform_driver axg_driver = {
.probe = meson_eeclkc_probe,
@@ -1363,4 +2184,5 @@ static struct platform_driver axg_driver = {
},
};
-builtin_platform_driver(axg_driver);
+module_platform_driver(axg_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index 0431dabac629..481b307ea3cb 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -139,8 +139,29 @@
#define CLKID_HIFI_PLL_DCO 88
#define CLKID_PCIE_PLL_DCO 89
#define CLKID_PCIE_PLL_OD 90
+#define CLKID_VPU_0_DIV 91
+#define CLKID_VPU_1_DIV 94
+#define CLKID_VAPB_0_DIV 98
+#define CLKID_VAPB_1_DIV 101
+#define CLKID_VCLK_SEL 108
+#define CLKID_VCLK2_SEL 109
+#define CLKID_VCLK_INPUT 110
+#define CLKID_VCLK2_INPUT 111
+#define CLKID_VCLK_DIV 112
+#define CLKID_VCLK2_DIV 113
+#define CLKID_VCLK_DIV2_EN 114
+#define CLKID_VCLK_DIV4_EN 115
+#define CLKID_VCLK_DIV6_EN 116
+#define CLKID_VCLK_DIV12_EN 117
+#define CLKID_VCLK2_DIV2_EN 118
+#define CLKID_VCLK2_DIV4_EN 119
+#define CLKID_VCLK2_DIV6_EN 120
+#define CLKID_VCLK2_DIV12_EN 121
+#define CLKID_CTS_ENCL_SEL 132
+#define CLKID_VDIN_MEAS_SEL 134
+#define CLKID_VDIN_MEAS_DIV 135
-#define NR_CLKS 91
+#define NR_CLKS 137
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/axg-clkc.h>
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index 62499563e4f5..b52990e574d2 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include "meson-aoclk.h"
#include "g12a-aoclk.h"
@@ -461,6 +462,7 @@ static const struct of_device_id g12a_aoclkc_match_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, g12a_aoclkc_match_table);
static struct platform_driver g12a_aoclkc_driver = {
.probe = meson_aoclkc_probe,
@@ -470,4 +472,5 @@ static struct platform_driver g12a_aoclkc_driver = {
},
};
-builtin_platform_driver(g12a_aoclkc_driver);
+module_platform_driver(g12a_aoclkc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index b814d44917a5..b080359b4645 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -15,6 +15,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include "clk-mpll.h"
#include "clk-pll.h"
@@ -3657,6 +3658,68 @@ static struct clk_regmap g12a_hdmi_tx = {
},
};
+/* MIPI DSI Host Clocks */
+
+static const struct clk_hw *g12a_mipi_dsi_pxclk_parent_hws[] = {
+ &g12a_vid_pll.hw,
+ &g12a_gp0_pll.hw,
+ &g12a_hifi_pll.hw,
+ &g12a_mpll1.hw,
+ &g12a_fclk_div2.hw,
+ &g12a_fclk_div2p5.hw,
+ &g12a_fclk_div3.hw,
+ &g12a_fclk_div7.hw,
+};
+
+static struct clk_regmap g12a_mipi_dsi_pxclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MIPIDSI_PHY_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mipi_dsi_pxclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = g12a_mipi_dsi_pxclk_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mipi_dsi_pxclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MIPIDSI_PHY_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mipi_dsi_pxclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mipi_dsi_pxclk_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mipi_dsi_pxclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MIPIDSI_PHY_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "mipi_dsi_pxclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mipi_dsi_pxclk_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* HDMI Clocks */
static const struct clk_parent_data g12a_hdmi_parent_data[] = {
@@ -4402,6 +4465,9 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
[CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
[CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
+ [CLKID_MIPI_DSI_PXCLK_SEL] = &g12a_mipi_dsi_pxclk_sel.hw,
+ [CLKID_MIPI_DSI_PXCLK_DIV] = &g12a_mipi_dsi_pxclk_div.hw,
+ [CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4657,6 +4723,9 @@ static struct clk_hw_onecell_data g12b_hw_onecell_data = {
[CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
[CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
[CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
+ [CLKID_MIPI_DSI_PXCLK_SEL] = &g12a_mipi_dsi_pxclk_sel.hw,
+ [CLKID_MIPI_DSI_PXCLK_DIV] = &g12a_mipi_dsi_pxclk_div.hw,
+ [CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4903,6 +4972,9 @@ static struct clk_hw_onecell_data sm1_hw_onecell_data = {
[CLKID_NNA_CORE_CLK_SEL] = &sm1_nna_core_clk_sel.hw,
[CLKID_NNA_CORE_CLK_DIV] = &sm1_nna_core_clk_div.hw,
[CLKID_NNA_CORE_CLK] = &sm1_nna_core_clk.hw,
+ [CLKID_MIPI_DSI_PXCLK_SEL] = &g12a_mipi_dsi_pxclk_sel.hw,
+ [CLKID_MIPI_DSI_PXCLK_DIV] = &g12a_mipi_dsi_pxclk_div.hw,
+ [CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -5150,16 +5222,20 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&sm1_nna_core_clk_sel,
&sm1_nna_core_clk_div,
&sm1_nna_core_clk,
+ &g12a_mipi_dsi_pxclk_sel,
+ &g12a_mipi_dsi_pxclk_div,
+ &g12a_mipi_dsi_pxclk,
};
static const struct reg_sequence g12a_init_regs[] = {
{ .reg = HHI_MPLL_CNTL0, .def = 0x00000543 },
};
-static int meson_g12a_dvfs_setup_common(struct platform_device *pdev,
+#define DVFS_CON_ID "dvfs"
+
+static int meson_g12a_dvfs_setup_common(struct device *dev,
struct clk_hw **hws)
{
- const char *notifier_clk_name;
struct clk *notifier_clk;
struct clk_hw *xtal;
int ret;
@@ -5168,21 +5244,22 @@ static int meson_g12a_dvfs_setup_common(struct platform_device *pdev,
/* Setup clock notifier for cpu_clk_postmux0 */
g12a_cpu_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk_postmux0.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk,
- &g12a_cpu_clk_postmux0_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_postmux0.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_postmux0_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpu_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpu_clk_postmux0 notifier\n");
return ret;
}
/* Setup clock notifier for cpu_clk_dyn mux */
- notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk_dyn.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_dyn.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpu_clk_dyn notifier\n");
+ dev_err(dev, "failed to register the cpu_clk_dyn notifier\n");
return ret;
}
@@ -5192,33 +5269,34 @@ static int meson_g12a_dvfs_setup_common(struct platform_device *pdev,
static int meson_g12b_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12b_hw_onecell_data.hws;
- const char *notifier_clk_name;
+ struct device *dev = &pdev->dev;
struct clk *notifier_clk;
struct clk_hw *xtal;
int ret;
- ret = meson_g12a_dvfs_setup_common(pdev, hws);
+ ret = meson_g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
xtal = clk_hw_get_parent_by_index(hws[CLKID_CPU_CLK_DYN1_SEL], 0);
/* Setup clock notifier for cpu_clk mux */
- notifier_clk_name = clk_hw_get_name(&g12b_cpu_clk.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpu_clk.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpu_clk notifier\n");
+ dev_err(dev, "failed to register the cpu_clk notifier\n");
return ret;
}
/* Setup clock notifier for sys1_pll */
- notifier_clk_name = clk_hw_get_name(&g12b_sys1_pll.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk,
- &g12b_cpu_clk_sys1_pll_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_sys1_pll.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12b_cpu_clk_sys1_pll_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the sys1_pll notifier\n");
+ dev_err(dev, "failed to register the sys1_pll notifier\n");
return ret;
}
@@ -5226,40 +5304,39 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
/* Setup clock notifier for cpub_clk_postmux0 */
g12b_cpub_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk_name = clk_hw_get_name(&g12b_cpub_clk_postmux0.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk,
- &g12b_cpub_clk_postmux0_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_postmux0.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12b_cpub_clk_postmux0_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpub_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpub_clk_postmux0 notifier\n");
return ret;
}
/* Setup clock notifier for cpub_clk_dyn mux */
- notifier_clk_name = clk_hw_get_name(&g12b_cpub_clk_dyn.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw, "dvfs");
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpub_clk_dyn notifier\n");
+ dev_err(dev, "failed to register the cpub_clk_dyn notifier\n");
return ret;
}
/* Setup clock notifier for cpub_clk mux */
- notifier_clk_name = clk_hw_get_name(&g12b_cpub_clk.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk.hw, DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpub_clk notifier\n");
+ dev_err(dev, "failed to register the cpub_clk notifier\n");
return ret;
}
/* Setup clock notifier for sys_pll */
- notifier_clk_name = clk_hw_get_name(&g12a_sys_pll.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk,
- &g12b_cpub_clk_sys_pll_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_sys_pll.hw, DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12b_cpub_clk_sys_pll_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the sys_pll notifier\n");
+ dev_err(dev, "failed to register the sys_pll notifier\n");
return ret;
}
@@ -5269,29 +5346,29 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
static int meson_g12a_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12a_hw_onecell_data.hws;
- const char *notifier_clk_name;
+ struct device *dev = &pdev->dev;
struct clk *notifier_clk;
int ret;
- ret = meson_g12a_dvfs_setup_common(pdev, hws);
+ ret = meson_g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
/* Setup clock notifier for cpu_clk mux */
- notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk.hw, DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpu_clk notifier\n");
+ dev_err(dev, "failed to register the cpu_clk notifier\n");
return ret;
}
/* Setup clock notifier for sys_pll */
- notifier_clk_name = clk_hw_get_name(&g12a_sys_pll.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_sys_pll_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_sys_pll.hw, DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_sys_pll_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the sys_pll notifier\n");
+ dev_err(dev, "failed to register the sys_pll notifier\n");
return ret;
}
@@ -5370,6 +5447,7 @@ static const struct of_device_id clkc_match_table[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
static struct platform_driver g12a_driver = {
.probe = meson_g12a_probe,
@@ -5379,4 +5457,5 @@ static struct platform_driver g12a_driver = {
},
};
-builtin_platform_driver(g12a_driver);
+module_platform_driver(g12a_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index 69b6a69549c7..a97613df38b3 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -264,8 +264,9 @@
#define CLKID_NNA_AXI_CLK_DIV 263
#define CLKID_NNA_CORE_CLK_SEL 265
#define CLKID_NNA_CORE_CLK_DIV 266
+#define CLKID_MIPI_DSI_PXCLK_DIV 268
-#define NR_CLKS 268
+#define NR_CLKS 271
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/g12a-clkc.h>
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index e940861a396b..fce95cf89836 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -5,6 +5,7 @@
*/
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include "meson-aoclk.h"
#include "gxbb-aoclk.h"
@@ -287,6 +288,7 @@ static const struct of_device_id gxbb_aoclkc_match_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, gxbb_aoclkc_match_table);
static struct platform_driver gxbb_aoclkc_driver = {
.probe = meson_aoclkc_probe,
@@ -295,4 +297,5 @@ static struct platform_driver gxbb_aoclkc_driver = {
.of_match_table = gxbb_aoclkc_match_table,
},
};
-builtin_platform_driver(gxbb_aoclkc_driver);
+module_platform_driver(gxbb_aoclkc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 0a68af6eec3d..d6eed760327d 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include "gxbb.h"
#include "clk-regmap.h"
@@ -3519,6 +3520,7 @@ static const struct of_device_id clkc_match_table[] = {
{ .compatible = "amlogic,gxl-clkc", .data = &gxl_clkc_data },
{},
};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
static struct platform_driver gxbb_driver = {
.probe = meson_eeclkc_probe,
@@ -3528,4 +3530,5 @@ static struct platform_driver gxbb_driver = {
},
};
-builtin_platform_driver(gxbb_driver);
+module_platform_driver(gxbb_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 3a6d84cd6601..27cd2c1f3f61 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -14,6 +14,8 @@
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
+#include <linux/module.h>
+
#include <linux/slab.h>
#include "meson-aoclk.h"
@@ -84,3 +86,5 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
(void *) data->hw_data);
}
+EXPORT_SYMBOL_GPL(meson_aoclkc_probe);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index a7cb1e7aedc4..8d5a5dab955a 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -9,6 +9,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/module.h>
#include "clk-regmap.h"
#include "meson-eeclk.h"
@@ -54,3 +55,5 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
data->hw_onecell_data);
}
+EXPORT_SYMBOL_GPL(meson_eeclkc_probe);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index eea69d498bd2..7aa7f4a9564f 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int mmp2_audio_clk_suspend(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+static int mmp2_audio_clk_resume(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
index e9e306d4e9af..41271351cf1f 100644
--- a/drivers/clk/mvebu/armada-37xx-xtal.c
+++ b/drivers/clk/mvebu/armada-37xx-xtal.c
@@ -13,8 +13,8 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define NB_GPIO1_LATCH 0xC
-#define XTAL_MODE BIT(31)
+#define NB_GPIO1_LATCH 0x8
+#define XTAL_MODE BIT(9)
static int armada_3700_xtal_clock_probe(struct platform_device *pdev)
{
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 3a965bd326d5..d32bb12cd8d0 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -44,7 +44,7 @@ config QCOM_CLK_APCC_MSM8996
help
Support for the CPU clock controller on msm8996 devices.
Say Y if you want to support CPU clock scaling using CPUfreq
- drivers for dyanmic power management.
+ drivers for dynamic power management.
config QCOM_CLK_RPM
tristate "RPM based Clock Controller"
@@ -290,6 +290,15 @@ config QCS_GCC_404
Say Y if you want to use multimedia devices or peripheral
devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+config SC_CAMCC_7180
+ tristate "SC7180 Camera Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SC7180 devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SC_DISPCC_7180
tristate "SC7180 Display Clock Controller"
select SC_GCC_7180
@@ -413,6 +422,14 @@ config SDM_LPASSCC_845
Say Y if you want to use the LPASS branch clocks of the LPASS clock
controller to reset the LPASS subsystem.
+config SDX_GCC_55
+ tristate "SDX55 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SDX55 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_DISPCC_8250
tristate "SM8150 and SM8250 Display Clock Controller"
depends on SM_GCC_8150 || SM_GCC_8250
@@ -502,4 +519,10 @@ config KRAITCC
Support for the Krait CPU clocks on Qualcomm devices.
Say Y if you want to support CPU frequency scaling.
+config CLK_GFM_LPASS_SM8250
+ tristate "SM8250 GFM LPASS Clocks"
+ help
+ Support for the Glitch Free Mux (GFM) Low power audio
+ subsystem (LPASS) clocks found on SM8250 SoCs.
+
endif
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 11ae86febe87..9e5e0e3cb7b4 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -19,6 +19,7 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
# Keep alphabetically sorted by config
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
+obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
@@ -64,6 +66,7 @@ obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o
obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
new file mode 100644
index 000000000000..dbac5651ab85
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sc7180.c
@@ -0,0 +1,1732 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,camcc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_AUX,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL3_OUT_MAIN,
+ P_CORE_BI_PLL_TEST_SE,
+};
+
+static const struct pll_vco agera_vco[] = {
+ { 600000000, 3300000000UL, 0 },
+};
+
+static const struct pll_vco fabia_vco[] = {
+ { 249600000, 2000000000UL, 0 },
+};
+
+/* 600MHz configuration */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .user_ctl_hi_val = 0x00004805,
+ .user_ctl_val = 0x00000001,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+/* 860MHz configuration */
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x2a,
+ .alpha = 0x1555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .user_ctl_hi_val = 0x00004805,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+/* 1920MHz configuration */
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x64,
+ .config_ctl_val = 0x20000800,
+ .config_ctl_hi_val = 0x400003D2,
+ .test_ctl_val = 0x04000400,
+ .test_ctl_hi_val = 0x00004000,
+ .user_ctl_val = 0x0000030F,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = agera_vco,
+ .num_vco = ARRAY_SIZE(agera_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_AGERA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_agera_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor cam_cc_pll2_out_early = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2_out_early",
+ .parent_names = (const char *[]){ "cam_cc_pll2" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux[] = {
+ { 0x3, 4 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_aux,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_AGERA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2_out_aux",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 1080MHz configuration */
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x38,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .user_ctl_hi_val = 0x00004805,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_AUX, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll2_out_aux.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll2_out_early.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll2_out_early.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x6010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0xb0d8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0xb14c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(270000000, P_CAM_CC_PLL3_OUT_MAIN, 4, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x9064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x504c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x5070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x603c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xb088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(270000000, P_CAM_CC_PLL3_OUT_MAIN, 4, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0x903c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xa034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0xb004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0xb024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_jpeg_clk_src[] = {
+ F(66666667, P_CAM_CC_PLL0_OUT_EVEN, 9, 0, 0),
+ F(133333333, P_CAM_CC_PLL0_OUT_EVEN, 4.5, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xb04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_jpeg_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xb0f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_AUX, 10, 1, 2),
+ F(64000000, P_CAM_CC_PLL2_OUT_AUX, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x4044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x4064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x4084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x6058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x6070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_areg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x6038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x6028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xb124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0xb0f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0xb164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb164,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xb144,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xb144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_core_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xb11c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb11c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x5040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x5064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x5088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x5044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x5068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x508c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xb0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0x9080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0x907c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x907c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0x9054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x9038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xa058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xa04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xa030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0xb044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x7040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x703c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x703c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xb064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xb110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x403c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x405c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x405c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x407c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x407c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x409c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x409c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0xb140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb140,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0xb0a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_sys_tmr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x6004,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x9004,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xa004,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x7004,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xb134,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_hw *cam_cc_sc7180_hws[] = {
+ [CAM_CC_PLL2_OUT_EARLY] = &cam_cc_pll2_out_early.hw,
+};
+
+static struct clk_regmap *cam_cc_sc7180_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_AUX] = &cam_cc_pll2_out_aux.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+};
+static struct gdsc *cam_cc_sc7180_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct regmap_config cam_cc_sc7180_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xd028,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_sc7180_desc = {
+ .config = &cam_cc_sc7180_regmap_config,
+ .clk_hws = cam_cc_sc7180_hws,
+ .num_clk_hws = ARRAY_SIZE(cam_cc_sc7180_hws),
+ .clks = cam_cc_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sc7180_clocks),
+ .gdscs = cam_cc_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id cam_cc_sc7180_match_table[] = {
+ { .compatible = "qcom,sc7180-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sc7180_match_table);
+
+static int cam_cc_sc7180_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_clk_add(&pdev->dev, "xo");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to acquire XO clock\n");
+ goto disable_pm_runtime;
+ }
+
+ ret = pm_clk_add(&pdev->dev, "iface");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to acquire iface clock\n");
+ goto disable_pm_runtime;
+ }
+
+ ret = pm_runtime_get(&pdev->dev);
+ if (ret)
+ goto destroy_pm_clk;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sc7180_desc);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ pm_runtime_put(&pdev->dev);
+ goto destroy_pm_clk;
+ }
+
+ clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_fabia_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_agera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+
+ ret = qcom_cc_really_probe(pdev, &cam_cc_sc7180_desc, regmap);
+ pm_runtime_put(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register CAM CC clocks\n");
+ goto destroy_pm_clk;
+ }
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops cam_cc_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver cam_cc_sc7180_driver = {
+ .probe = cam_cc_sc7180_probe,
+ .driver = {
+ .name = "cam_cc-sc7180",
+ .of_match_table = cam_cc_sc7180_match_table,
+ .pm = &cam_cc_pm_ops,
+ },
+};
+
+static int __init cam_cc_sc7180_init(void)
+{
+ return platform_driver_register(&cam_cc_sc7180_driver);
+}
+subsys_initcall(cam_cc_sc7180_init);
+
+static void __exit cam_cc_sc7180_exit(void)
+{
+ platform_driver_unregister(&cam_cc_sc7180_driver);
+}
+module_exit(cam_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 564431130a76..21c357c26ec4 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -116,6 +116,16 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x38,
[PLL_OFF_ALPHA_VAL] = 0x40,
},
+ [CLK_ALPHA_PLL_TYPE_AGERA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_CONFIG_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL_U] = 0x14,
+ [PLL_OFF_TEST_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL_U] = 0x1c,
+ [PLL_OFF_STATUS] = 0x2c,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -207,6 +217,13 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
#define wait_for_pll_update_ack_clear(pll) \
wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 1, "update_ack_clear")
+static void clk_alpha_pll_write_config(struct regmap *regmap, unsigned int reg,
+ unsigned int val)
+{
+ if (val)
+ regmap_write(regmap, reg, val);
+}
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
@@ -1004,33 +1021,19 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
{
u32 val, mask;
- if (config->l)
- regmap_write(regmap, PLL_L_VAL(pll), config->l);
-
- if (config->alpha)
- regmap_write(regmap, PLL_FRAC(pll), config->alpha);
-
- if (config->config_ctl_val)
- regmap_write(regmap, PLL_CONFIG_CTL(pll),
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_FRAC(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
config->config_ctl_val);
-
- if (config->config_ctl_hi_val)
- regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
config->config_ctl_hi_val);
-
- if (config->user_ctl_val)
- regmap_write(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
-
- if (config->user_ctl_hi_val)
- regmap_write(regmap, PLL_USER_CTL_U(pll),
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll),
config->user_ctl_hi_val);
-
- if (config->test_ctl_val)
- regmap_write(regmap, PLL_TEST_CTL(pll),
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
config->test_ctl_val);
-
- if (config->test_ctl_hi_val)
- regmap_write(regmap, PLL_TEST_CTL_U(pll),
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
config->test_ctl_hi_val);
if (config->post_div_mask) {
@@ -1145,25 +1148,38 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
+/*
+ * Due to limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+static int alpha_pll_check_rate_margin(struct clk_hw *hw,
+ unsigned long rrate, unsigned long rate)
+{
+ unsigned long rate_margin = rate + PLL_RATE_MARGIN;
+
+ if (rrate > rate_margin || rrate < rate) {
+ pr_err("%s: Rounded rate %lu not within range [%lu, %lu)\n",
+ clk_hw_get_name(hw), rrate, rate, rate_margin);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha_width = pll_alpha_width(pll);
+ unsigned long rrate;
+ int ret;
u64 a;
- unsigned long rrate, max = rate + PLL_RATE_MARGIN;
rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
- /*
- * Due to limited number of bits for fractional rate programming, the
- * rounded up rate could be marginally higher than the requested rate.
- */
- if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) {
- pr_err("%s: Rounded rate %lu not within range [%lu, %lu)\n",
- clk_hw_get_name(hw), rrate, rate, max);
- return -EINVAL;
- }
+ ret = alpha_pll_check_rate_margin(hw, rrate, rate);
+ if (ret < 0)
+ return ret;
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
regmap_write(pll->clkr.regmap, PLL_FRAC(pll), a);
@@ -1206,12 +1222,10 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
rrate = alpha_pll_round_rate(cal_freq, clk_hw_get_rate(parent_hw),
&cal_l, &a, alpha_width);
- /*
- * Due to a limited number of bits for fractional rate programming, the
- * rounded up rate could be marginally higher than the requested rate.
- */
- if (rrate > (cal_freq + PLL_RATE_MARGIN) || rrate < cal_freq)
- return -EINVAL;
+
+ ret = alpha_pll_check_rate_margin(hw, rrate, cal_freq);
+ if (ret < 0)
+ return ret;
/* Setup PLL for calibration frequency */
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), cal_l);
@@ -1388,49 +1402,27 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
- if (config->l)
- regmap_write(regmap, PLL_L_VAL(pll), config->l);
-
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
-
- if (config->alpha)
- regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
-
- if (config->config_ctl_val)
- regmap_write(regmap, PLL_CONFIG_CTL(pll),
- config->config_ctl_val);
-
- if (config->config_ctl_hi_val)
- regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
- config->config_ctl_hi_val);
-
- if (config->config_ctl_hi1_val)
- regmap_write(regmap, PLL_CONFIG_CTL_U1(pll),
- config->config_ctl_hi1_val);
-
- if (config->user_ctl_val)
- regmap_write(regmap, PLL_USER_CTL(pll),
- config->user_ctl_val);
-
- if (config->user_ctl_hi_val)
- regmap_write(regmap, PLL_USER_CTL_U(pll),
- config->user_ctl_hi_val);
-
- if (config->user_ctl_hi1_val)
- regmap_write(regmap, PLL_USER_CTL_U1(pll),
- config->user_ctl_hi1_val);
-
- if (config->test_ctl_val)
- regmap_write(regmap, PLL_TEST_CTL(pll),
- config->test_ctl_val);
-
- if (config->test_ctl_hi_val)
- regmap_write(regmap, PLL_TEST_CTL_U(pll),
- config->test_ctl_hi_val);
-
- if (config->test_ctl_hi1_val)
- regmap_write(regmap, PLL_TEST_CTL_U1(pll),
- config->test_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll),
+ config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll),
+ config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll),
+ config->user_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll),
+ config->test_ctl_hi1_val);
regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
PLL_UPDATE_BYPASS);
@@ -1490,14 +1482,9 @@ static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
- /*
- * Due to a limited number of bits for fractional rate programming, the
- * rounded up rate could be marginally higher than the requested rate.
- */
- if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) {
- pr_err("Call set rate on the PLL with rounded rates!\n");
- return -EINVAL;
- }
+ ret = alpha_pll_check_rate_margin(hw, rrate, rate);
+ if (ret < 0)
+ return ret;
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
@@ -1561,3 +1548,55 @@ const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops);
+
+void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+}
+EXPORT_SYMBOL_GPL(clk_agera_pll_configure);
+
+static int clk_alpha_pll_agera_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha_width = pll_alpha_width(pll);
+ int ret;
+ unsigned long rrate;
+ u64 a;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+ ret = alpha_pll_check_rate_margin(hw, rrate, rate);
+ if (ret < 0)
+ return ret;
+
+ /* change L_VAL without having to go through the power on sequence */
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (clk_hw_is_enabled(hw))
+ return wait_for_pll_enable_lock(pll);
+
+ return 0;
+}
+
+const struct clk_ops clk_alpha_pll_agera_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_agera_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index d3201b87c0cd..0ea30d2f3da1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -15,6 +15,7 @@ enum {
CLK_ALPHA_PLL_TYPE_FABIA,
CLK_ALPHA_PLL_TYPE_TRION,
CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
+ CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -141,6 +142,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_trion_ops;
extern const struct clk_ops clk_alpha_pll_lucid_ops;
#define clk_alpha_pll_fixed_lucid_ops clk_alpha_pll_fixed_trion_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops;
+extern const struct clk_ops clk_alpha_pll_agera_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
@@ -148,6 +150,8 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
#define clk_lucid_pll_configure(pll, regmap, config) \
clk_trion_pll_configure(pll, regmap, config)
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index e2c669b08aff..6a2a13c5058e 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -349,6 +349,7 @@ DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
DEFINE_CLK_RPMH_VRM(sm8150, rf_clk3, rf_clk3_ao, "rfclka3", 1);
DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0");
+DEFINE_CLK_RPMH_BCM(sdm845, ce, "CE0");
static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
@@ -364,6 +365,7 @@ static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
[RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
[RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_CE_CLK] = &sdm845_ce.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
@@ -371,6 +373,25 @@ static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
.num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
};
+DEFINE_CLK_RPMH_VRM(sdx55, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
+DEFINE_CLK_RPMH_VRM(sdx55, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
+DEFINE_CLK_RPMH_BCM(sdx55, qpic_clk, "QP0");
+
+static struct clk_hw *sdx55_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_RF_CLK1] = &sdx55_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdx55_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdx55_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdx55_rf_clk2_ao.hw,
+ [RPMH_QPIC_CLK] = &sdx55_qpic_clk.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdx55 = {
+ .clks = sdx55_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdx55_rpmh_clocks),
+};
+
static struct clk_hw *sm8150_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
[RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
@@ -432,6 +453,39 @@ static const struct clk_rpmh_desc clk_rpmh_sm8250 = {
.num_clks = ARRAY_SIZE(sm8250_rpmh_clocks),
};
+DEFINE_CLK_RPMH_VRM(sm8350, div_clk1, div_clk1_ao, "divclka1", 2);
+DEFINE_CLK_RPMH_VRM(sm8350, rf_clk4, rf_clk4_ao, "rfclka4", 1);
+DEFINE_CLK_RPMH_VRM(sm8350, rf_clk5, rf_clk5_ao, "rfclka5", 1);
+DEFINE_CLK_RPMH_BCM(sm8350, pka, "PKA0");
+DEFINE_CLK_RPMH_BCM(sm8350, hwkm, "HK0");
+
+static struct clk_hw *sm8350_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_DIV_CLK1] = &sm8350_div_clk1.hw,
+ [RPMH_DIV_CLK1_A] = &sm8350_div_clk1_ao.hw,
+ [RPMH_LN_BB_CLK1] = &sm8250_ln_bb_clk1.hw,
+ [RPMH_LN_BB_CLK1_A] = &sm8250_ln_bb_clk1_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_RF_CLK4] = &sm8350_rf_clk4.hw,
+ [RPMH_RF_CLK4_A] = &sm8350_rf_clk4_ao.hw,
+ [RPMH_RF_CLK5] = &sm8350_rf_clk5.hw,
+ [RPMH_RF_CLK5_A] = &sm8350_rf_clk5_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_PKA_CLK] = &sm8350_pka.hw,
+ [RPMH_HWKM_CLK] = &sm8350_hwkm.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
+ .clks = sm8350_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -517,8 +571,10 @@ static int clk_rpmh_probe(struct platform_device *pdev)
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
+ { .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
+ { .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 07a98d3f882d..588575e1169d 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -963,6 +963,7 @@ static struct gdsc mdss_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.flags = HW_CTRL,
+ .supply = "mmcx",
};
static struct clk_regmap *disp_cc_sm8250_clocks[] = {
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index 68d8f7aaf64e..b05901b24917 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
.name = "gcc_sdcc1_ice_core_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -651,6 +651,7 @@ static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(9600000, P_BI_TCXO, 2, 0, 0),
F(19200000, P_BI_TCXO, 1, 0, 0),
F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
{ }
@@ -666,7 +667,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_5,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -890,21 +891,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
},
};
-static struct clk_branch gcc_camera_ahb_clk = {
- .halt_reg = 0xb008,
- .halt_check = BRANCH_HALT,
- .hwcg_reg = 0xb008,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0xb008,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_camera_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_camera_hf_axi_clk = {
.halt_reg = 0xb020,
.halt_check = BRANCH_HALT,
@@ -2316,7 +2302,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
- [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
@@ -2518,11 +2503,12 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
/*
* Keep the clocks always-ON
- * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
- * GCC_GPU_CFG_AHB_CLK
+ * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
+ * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
*/
regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
diff --git a/drivers/clk/qcom/gcc-sdx55.c b/drivers/clk/qcom/gcc-sdx55.c
new file mode 100644
index 000000000000..e3b9030b2bae
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sdx55.c
@@ -0,0 +1,1659 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdx55.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_EVEN,
+ P_GPLL5_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_lucid_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_lucid_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_lucid_even),
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_even = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_lucid_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_lucid_even),
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll5 = {
+ .offset = 0x74000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll5",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct clk_parent_data gcc_parents_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_EVEN, 2 },
+ { P_GPLL5_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4_out_even.clkr.hw },
+ { .hw = &gpll5.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4_out_even.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x11024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_BI_TCXO, 10, 1, 2),
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_EVEN, 5, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 12.5, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1100c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x13024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1300c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x15024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1500c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x17024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1700c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_EVEN, 1, 192, 15625),
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(16000000, P_GPLL0_OUT_EVEN, 1, 4, 75),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(19354839, P_GPLL0_OUT_MAIN, 15.5, 1, 2),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(20689655, P_GPLL0_OUT_MAIN, 14.5, 1, 2),
+ F(21428571, P_GPLL0_OUT_MAIN, 14, 1, 2),
+ F(22222222, P_GPLL0_OUT_MAIN, 13.5, 1, 2),
+ F(23076923, P_GPLL0_OUT_MAIN, 13, 1, 2),
+ F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(26086957, P_GPLL0_OUT_MAIN, 11.5, 1, 2),
+ F(27272727, P_GPLL0_OUT_MAIN, 11, 1, 2),
+ F(28571429, P_GPLL0_OUT_MAIN, 10.5, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
+ F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
+ F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1200c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x1800c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x24010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parents_0_ao,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x2402c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_data = gcc_parents_0_ao,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_clk_src[] = {
+ F(2500000, P_BI_TCXO, 1, 25, 192),
+ F(5000000, P_BI_TCXO, 1, 25, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(250000000, P_GPLL4_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_clk_src = {
+ .cmd_rcgr = 0x47020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(230400000, P_GPLL5_OUT_MAIN, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x47038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac_ptp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x2b004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x2c004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x2d004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_aux_phy_clk_src = {
+ .cmd_rcgr = 0x37034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_phy_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_rchng_phy_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_rchng_phy_clk_src = {
+ .cmd_rcgr = 0x37050,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_rchng_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_rchng_phy_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x19010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0xf00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_master_clk_src = {
+ .cmd_rcgr = 0xb024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xb03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_phy_aux_clk_src[] = {
+ F(1000000, P_BI_TCXO, 1, 5, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0xb064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ahb_pcie_link_clk = {
+ .halt_reg = 0x22004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x22004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb_pcie_link_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x10004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x11008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup1_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x11004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup1_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x13008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup2_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup2_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x15008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup3_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x15004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup3_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup4_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup4_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_uart1_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_uart2_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_uart3_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_uart4_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x2100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x21008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x24008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_cpuss_rbcpr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_axi_clk = {
+ .halt_reg = 0x4701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_ptp_clk = {
+ .halt_reg = 0x47018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_ptp_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_emac_ptp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_rgmii_clk = {
+ .halt_reg = 0x47010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_rgmii_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_emac_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_slave_ahb_clk = {
+ .halt_reg = 0x47014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_slave_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x2b000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x2c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x2d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x88004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x88004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_aux_clk = {
+ .halt_reg = 0x37024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_cfg_ahb_clk = {
+ .halt_reg = 0x3701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_mstr_axi_clk = {
+ .halt_reg = 0x37018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_pipe_clk = {
+ .halt_reg = 0x3702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rchng_phy_clk = {
+ .halt_reg = 0x37020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_rchng_phy_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_rchng_phy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_sleep_clk = {
+ .halt_reg = 0x37028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_sleep_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_aux_phy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_axi_clk = {
+ .halt_reg = 0x37014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x37014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_q2a_axi_clk = {
+ .halt_reg = 0x37010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x1900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pdm2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x19004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x19004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0xf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sdcc1_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mock_utmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mstr_axi_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_slv_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0xb058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_phy_aux_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0xb05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xb05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x88000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x88000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0xe004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_pcie_link_clk = {
+ .halt_reg = 0x22008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x22008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_xo_pcie_link_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0x0b004,
+ .pd = {
+ .name = "usb30_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_gdsc = {
+ .gdscr = 0x37004,
+ .pd = {
+ .name = "pcie_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc emac_gdsc = {
+ .gdscr = 0x47004,
+ .pd = {
+ .name = "emac_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_sdx55_clocks[] = {
+ [GCC_AHB_PCIE_LINK_CLK] = &gcc_ahb_pcie_link_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup1_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup1_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup2_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup2_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup3_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup3_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup4_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup4_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK_SRC] = &gcc_blsp1_uart1_apps_clk_src.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK_SRC] = &gcc_blsp1_uart2_apps_clk_src.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK_SRC] = &gcc_blsp1_uart3_apps_clk_src.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK_SRC] = &gcc_blsp1_uart4_apps_clk_src.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_EMAC_CLK_SRC] = &gcc_emac_clk_src.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_ETH_AXI_CLK] = &gcc_eth_axi_clk.clkr,
+ [GCC_ETH_PTP_CLK] = &gcc_eth_ptp_clk.clkr,
+ [GCC_ETH_RGMII_CLK] = &gcc_eth_rgmii_clk.clkr,
+ [GCC_ETH_SLAVE_AHB_CLK] = &gcc_eth_slave_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
+ [GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
+ [GCC_PCIE_CFG_AHB_CLK] = &gcc_pcie_cfg_ahb_clk.clkr,
+ [GCC_PCIE_MSTR_AXI_CLK] = &gcc_pcie_mstr_axi_clk.clkr,
+ [GCC_PCIE_PIPE_CLK] = &gcc_pcie_pipe_clk.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK] = &gcc_pcie_rchng_phy_clk.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK_SRC] = &gcc_pcie_rchng_phy_clk_src.clkr,
+ [GCC_PCIE_SLEEP_CLK] = &gcc_pcie_sleep_clk.clkr,
+ [GCC_PCIE_SLV_AXI_CLK] = &gcc_pcie_slv_axi_clk.clkr,
+ [GCC_PCIE_SLV_Q2A_AXI_CLK] = &gcc_pcie_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MASTER_CLK_SRC] = &gcc_usb30_master_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MSTR_AXI_CLK] = &gcc_usb30_mstr_axi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB30_SLV_AHB_CLK] = &gcc_usb30_slv_ahb_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK_SRC] = &gcc_usb3_phy_aux_clk_src.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_XO_PCIE_LINK_CLK] = &gcc_xo_pcie_link_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_OUT_EVEN] = &gpll4_out_even.clkr,
+ [GPLL5] = &gpll5.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdx55_resets[] = {
+ [GCC_EMAC_BCR] = { 0x47000 },
+ [GCC_PCIE_BCR] = { 0x37000 },
+ [GCC_PCIE_LINK_DOWN_BCR] = { 0x77000 },
+ [GCC_PCIE_PHY_BCR] = { 0x39000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x78004 },
+ [GCC_QUSB2PHY_BCR] = { 0xd000 },
+ [GCC_USB30_BCR] = { 0xb000 },
+ [GCC_USB3_PHY_BCR] = { 0xc000 },
+ [GCC_USB3PHY_PHY_BCR] = { 0xc004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0xe000 },
+};
+
+static struct gdsc *gcc_sdx55_gdscs[] = {
+ [USB30_GDSC] = &usb30_gdsc,
+ [PCIE_GDSC] = &pcie_gdsc,
+ [EMAC_GDSC] = &emac_gdsc,
+};
+
+static const struct regmap_config gcc_sdx55_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9b040,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdx55_desc = {
+ .config = &gcc_sdx55_regmap_config,
+ .clks = gcc_sdx55_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdx55_clocks),
+ .resets = gcc_sdx55_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdx55_resets),
+ .gdscs = gcc_sdx55_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdx55_gdscs),
+};
+
+static const struct of_device_id gcc_sdx55_match_table[] = {
+ { .compatible = "qcom,gcc-sdx55" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdx55_match_table);
+
+static int gcc_sdx55_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sdx55_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Keep the clocks always-ON as they are critical to the functioning
+ * of the system:
+ * GCC_SYS_NOC_CPUSS_AHB_CLK, GCC_CPUSS_AHB_CLK, GCC_CPUSS_GNOC_CLK
+ */
+ regmap_update_bits(regmap, 0x6d008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21));
+ regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22));
+
+ return qcom_cc_really_probe(pdev, &gcc_sdx55_desc, regmap);
+}
+
+static struct platform_driver gcc_sdx55_driver = {
+ .probe = gcc_sdx55_probe,
+ .driver = {
+ .name = "gcc-sdx55",
+ .of_match_table = gcc_sdx55_match_table,
+ },
+};
+
+static int __init gcc_sdx55_init(void)
+{
+ return platform_driver_register(&gcc_sdx55_driver);
+}
+subsys_initcall(gcc_sdx55_init);
+
+static void __exit gcc_sdx55_exit(void)
+{
+ platform_driver_unregister(&gcc_sdx55_driver);
+}
+module_exit(gcc_sdx55_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDX55 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
index 6cb6617b8d88..ab594a0f0c40 100644
--- a/drivers/clk/qcom/gcc-sm8250.c
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -722,7 +722,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_4,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -745,7 +745,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
.name = "gcc_sdcc4_apps_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/lpass-gfm-sm8250.c b/drivers/clk/qcom/lpass-gfm-sm8250.c
new file mode 100644
index 000000000000..d366c7c2abc7
--- /dev/null
+++ b/drivers/clk/qcom/lpass-gfm-sm8250.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LPASS Audio CC and Always ON CC Glitch Free Mux clock driver
+ *
+ * Copyright (c) 2020 Linaro Ltd.
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <dt-bindings/clock/qcom,sm8250-lpass-audiocc.h>
+#include <dt-bindings/clock/qcom,sm8250-lpass-aoncc.h>
+
+struct lpass_gfm {
+ struct device *dev;
+ void __iomem *base;
+};
+
+struct clk_gfm {
+ unsigned int mux_reg;
+ unsigned int mux_mask;
+ struct clk_hw hw;
+ struct lpass_gfm *priv;
+ void __iomem *gfm_mux;
+};
+
+#define GFM_MASK BIT(1)
+#define to_clk_gfm(_hw) container_of(_hw, struct clk_gfm, hw)
+
+static u8 clk_gfm_get_parent(struct clk_hw *hw)
+{
+ struct clk_gfm *clk = to_clk_gfm(hw);
+
+ return readl(clk->gfm_mux) & GFM_MASK;
+}
+
+static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_gfm *clk = to_clk_gfm(hw);
+ unsigned int val;
+
+ val = readl(clk->gfm_mux);
+
+ if (index)
+ val |= GFM_MASK;
+ else
+ val &= ~GFM_MASK;
+
+ writel(val, clk->gfm_mux);
+
+ return 0;
+}
+
+static const struct clk_ops clk_gfm_ops = {
+ .get_parent = clk_gfm_get_parent,
+ .set_parent = clk_gfm_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk_gfm lpass_gfm_va_mclk = {
+ .mux_reg = 0x20000,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "VA_MCLK",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .num_parents = 2,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_VA_CORE_MCLK",
+ },
+ },
+ },
+};
+
+static struct clk_gfm lpass_gfm_tx_npl = {
+ .mux_reg = 0x20000,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "TX_NPL",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_NPL_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_VA_CORE_2X_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm lpass_gfm_wsa_mclk = {
+ .mux_reg = 0x220d8,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "WSA_MCLK",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_WSA_CORE_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm lpass_gfm_wsa_npl = {
+ .mux_reg = 0x220d8,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "WSA_NPL",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_NPL_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_WSA_CORE_NPL_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm lpass_gfm_rx_mclk_mclk2 = {
+ .mux_reg = 0x240d8,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "RX_MCLK_MCLK2",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_RX_CORE_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm lpass_gfm_rx_npl = {
+ .mux_reg = 0x240d8,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "RX_NPL",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_NPL_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_RX_CORE_NPL_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm *aoncc_gfm_clks[] = {
+ [LPASS_CDC_VA_MCLK] = &lpass_gfm_va_mclk,
+ [LPASS_CDC_TX_NPL] = &lpass_gfm_tx_npl,
+};
+
+static struct clk_hw_onecell_data aoncc_hw_onecell_data = {
+ .hws = {
+ [LPASS_CDC_VA_MCLK] = &lpass_gfm_va_mclk.hw,
+ [LPASS_CDC_TX_NPL] = &lpass_gfm_tx_npl.hw,
+ },
+ .num = ARRAY_SIZE(aoncc_gfm_clks),
+};
+
+static struct clk_gfm *audiocc_gfm_clks[] = {
+ [LPASS_CDC_WSA_NPL] = &lpass_gfm_wsa_npl,
+ [LPASS_CDC_WSA_MCLK] = &lpass_gfm_wsa_mclk,
+ [LPASS_CDC_RX_NPL] = &lpass_gfm_rx_npl,
+ [LPASS_CDC_RX_MCLK_MCLK2] = &lpass_gfm_rx_mclk_mclk2,
+};
+
+static struct clk_hw_onecell_data audiocc_hw_onecell_data = {
+ .hws = {
+ [LPASS_CDC_WSA_NPL] = &lpass_gfm_wsa_npl.hw,
+ [LPASS_CDC_WSA_MCLK] = &lpass_gfm_wsa_mclk.hw,
+ [LPASS_CDC_RX_NPL] = &lpass_gfm_rx_npl.hw,
+ [LPASS_CDC_RX_MCLK_MCLK2] = &lpass_gfm_rx_mclk_mclk2.hw,
+ },
+ .num = ARRAY_SIZE(audiocc_gfm_clks),
+};
+
+struct lpass_gfm_data {
+ struct clk_hw_onecell_data *onecell_data;
+ struct clk_gfm **gfm_clks;
+};
+
+static struct lpass_gfm_data audiocc_data = {
+ .onecell_data = &audiocc_hw_onecell_data,
+ .gfm_clks = audiocc_gfm_clks,
+};
+
+static struct lpass_gfm_data aoncc_data = {
+ .onecell_data = &aoncc_hw_onecell_data,
+ .gfm_clks = aoncc_gfm_clks,
+};
+
+static int lpass_gfm_clk_driver_probe(struct platform_device *pdev)
+{
+ const struct lpass_gfm_data *data;
+ struct device *dev = &pdev->dev;
+ struct clk_gfm *gfm;
+ struct lpass_gfm *cc;
+ int err, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ cc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cc->base))
+ return PTR_ERR(cc->base);
+
+ pm_runtime_enable(dev);
+ err = pm_clk_create(dev);
+ if (err)
+ goto pm_clk_err;
+
+ err = of_pm_clk_add_clks(dev);
+ if (err < 0) {
+ dev_dbg(dev, "Failed to get lpass core voting clocks\n");
+ goto clk_reg_err;
+ }
+
+ for (i = 0; i < data->onecell_data->num; i++) {
+ if (!data->gfm_clks[i])
+ continue;
+
+ gfm = data->gfm_clks[i];
+ gfm->priv = cc;
+ gfm->gfm_mux = cc->base;
+ gfm->gfm_mux = gfm->gfm_mux + data->gfm_clks[i]->mux_reg;
+
+ err = devm_clk_hw_register(dev, &data->gfm_clks[i]->hw);
+ if (err)
+ goto clk_reg_err;
+
+ }
+
+ err = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ data->onecell_data);
+ if (err)
+ goto clk_reg_err;
+
+ return 0;
+
+clk_reg_err:
+ pm_clk_destroy(dev);
+pm_clk_err:
+ pm_runtime_disable(dev);
+ return err;
+}
+
+static const struct of_device_id lpass_gfm_clk_match_table[] = {
+ {
+ .compatible = "qcom,sm8250-lpass-aoncc",
+ .data = &aoncc_data,
+ },
+ {
+ .compatible = "qcom,sm8250-lpass-audiocc",
+ .data = &audiocc_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_gfm_clk_match_table);
+
+static const struct dev_pm_ops lpass_gfm_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver lpass_gfm_clk_driver = {
+ .probe = lpass_gfm_clk_driver_probe,
+ .driver = {
+ .name = "lpass-gfm-clk",
+ .of_match_table = lpass_gfm_clk_match_table,
+ .pm = &lpass_gfm_pm_ops,
+ },
+};
+module_platform_driver(lpass_gfm_clk_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 228d08f5d26f..2e0ecc38efdd 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -356,12 +356,52 @@ static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = {
.num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs),
};
+static void lpass_pm_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
+static void lpass_pm_clk_destroy(void *data)
+{
+ pm_clk_destroy(data);
+}
+
+static int lpass_create_pm_clks(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_add_action_or_reset(&pdev->dev, lpass_pm_runtime_disable, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, lpass_pm_clk_destroy, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_clk_add(&pdev->dev, "iface");
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+
+ return ret;
+}
+
static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
{
const struct qcom_cc_desc *desc;
struct regmap *regmap;
int ret;
+ ret = lpass_create_pm_clks(pdev);
+ if (ret)
+ return ret;
+
lpass_core_cc_sc7180_regmap_config.name = "lpass_audio_cc";
desc = &lpass_audio_hm_sc7180_desc;
ret = qcom_cc_probe_by_index(pdev, 1, desc);
@@ -386,12 +426,22 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
clk_fabia_pll_configure(&lpass_lpaaudio_dig_pll, regmap,
&lpass_lpaaudio_dig_pll_config);
- return qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return ret;
}
static int lpass_hm_core_probe(struct platform_device *pdev)
{
const struct qcom_cc_desc *desc;
+ int ret;
+
+ ret = lpass_create_pm_clks(pdev);
+ if (ret)
+ return ret;
lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core";
desc = &lpass_core_hm_sc7180_desc;
@@ -399,61 +449,28 @@ static int lpass_hm_core_probe(struct platform_device *pdev)
return qcom_cc_probe_by_index(pdev, 0, desc);
}
-static const struct of_device_id lpass_core_cc_sc7180_match_table[] = {
+static const struct of_device_id lpass_hm_sc7180_match_table[] = {
{
.compatible = "qcom,sc7180-lpasshm",
- .data = lpass_hm_core_probe,
},
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_hm_sc7180_match_table);
+
+static const struct of_device_id lpass_core_cc_sc7180_match_table[] = {
{
.compatible = "qcom,sc7180-lpasscorecc",
- .data = lpass_core_cc_sc7180_probe,
},
{ }
};
MODULE_DEVICE_TABLE(of, lpass_core_cc_sc7180_match_table);
-static int lpass_core_sc7180_probe(struct platform_device *pdev)
-{
- int (*clk_probe)(struct platform_device *p);
- int ret;
-
- pm_runtime_enable(&pdev->dev);
- ret = pm_clk_create(&pdev->dev);
- if (ret)
- goto disable_pm_runtime;
-
- ret = pm_clk_add(&pdev->dev, "iface");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to acquire iface clock\n");
- goto destroy_pm_clk;
- }
-
- ret = -EINVAL;
- clk_probe = of_device_get_match_data(&pdev->dev);
- if (!clk_probe)
- goto destroy_pm_clk;
-
- ret = clk_probe(pdev);
- if (ret)
- goto destroy_pm_clk;
-
- return 0;
-
-destroy_pm_clk:
- pm_clk_destroy(&pdev->dev);
-
-disable_pm_runtime:
- pm_runtime_disable(&pdev->dev);
-
- return ret;
-}
-
static const struct dev_pm_ops lpass_core_cc_pm_ops = {
SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
};
static struct platform_driver lpass_core_cc_sc7180_driver = {
- .probe = lpass_core_sc7180_probe,
+ .probe = lpass_core_cc_sc7180_probe,
.driver = {
.name = "lpass_core_cc-sc7180",
.of_match_table = lpass_core_cc_sc7180_match_table,
@@ -461,17 +478,43 @@ static struct platform_driver lpass_core_cc_sc7180_driver = {
},
};
-static int __init lpass_core_cc_sc7180_init(void)
+static const struct dev_pm_ops lpass_hm_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver lpass_hm_sc7180_driver = {
+ .probe = lpass_hm_core_probe,
+ .driver = {
+ .name = "lpass_hm-sc7180",
+ .of_match_table = lpass_hm_sc7180_match_table,
+ .pm = &lpass_hm_pm_ops,
+ },
+};
+
+static int __init lpass_sc7180_init(void)
{
- return platform_driver_register(&lpass_core_cc_sc7180_driver);
+ int ret;
+
+ ret = platform_driver_register(&lpass_core_cc_sc7180_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&lpass_hm_sc7180_driver);
+ if (ret) {
+ platform_driver_unregister(&lpass_core_cc_sc7180_driver);
+ return ret;
+ }
+
+ return 0;
}
-subsys_initcall(lpass_core_cc_sc7180_init);
+subsys_initcall(lpass_sc7180_init);
-static void __exit lpass_core_cc_sc7180_exit(void)
+static void __exit lpass_sc7180_exit(void)
{
+ platform_driver_unregister(&lpass_hm_sc7180_driver);
platform_driver_unregister(&lpass_core_cc_sc7180_driver);
}
-module_exit(lpass_core_cc_sc7180_exit);
+module_exit(lpass_sc7180_exit);
MODULE_DESCRIPTION("QTI LPASS_CORE_CC SC7180 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index 5f25a70bc61c..4146c1d717b9 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -121,7 +121,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
(phy_no ? CPG_DSI1PHYCR : CPG_DSI0PHYCR);
parent_name = phy_no ? "dsi1pck" : "dsi0pck";
- mult = __raw_readl(dsi_reg);
+ mult = readl(dsi_reg);
if (!(mult & 0x8000))
mult = 1;
else
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index fd54b9f625da..4a43ebec7d5e 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -41,6 +41,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -67,6 +68,12 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A774A1_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774A1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A774A1_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -200,6 +207,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A774A1_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A774A1_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A774A1_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A774A1_CLK_CP),
diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
index f436691271ec..6f04c40fe237 100644
--- a/drivers/clk/renesas/r8a774b1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
@@ -40,6 +40,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -65,6 +66,12 @@ static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A774B1_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774B1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A774B1_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -196,6 +203,7 @@ static const struct mssr_mod_clk r8a774b1_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A774B1_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774B1_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774B1_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A774B1_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A774B1_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A774B1_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A774B1_CLK_CP),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 9fc9fa9e531a..ed3a2cf0e0bb 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -44,6 +44,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_RINT,
CLK_OCO,
@@ -74,6 +75,13 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1),
+ DEF_FIXED_RPCSRC_E3(".rpcsrc", CLK_RPCSRC, CLK_PLL0, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A774C0_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774C0_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A774C0_CLK_RPC),
+
DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
@@ -199,6 +207,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A774C0_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A774C0_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2),
DEF_MOD("i2c5", 919, R8A774C0_CLK_S3D2),
DEF_MOD("i2c-dvfs", 926, R8A774C0_CLK_CP),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index 17ebbac7ddfb..aa5389b04d74 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -26,7 +26,6 @@
#include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
#include "renesas-cpg-mssr.h"
-#include "rcar-gen3-cpg.h"
enum rcar_r8a779a0_clk_types {
CLK_TYPE_R8A779A0_MAIN = CLK_TYPE_CUSTOM,
@@ -84,6 +83,14 @@ enum clk_ids {
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
+#define DEF_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_MDSEL, \
+ (_parent0) << 16 | (_parent1), \
+ .div = (_div0) << 16 | (_div1), .offset = _md)
+
+#define DEF_OSC(_name, _id, _parent, _div) \
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_OSC, _parent, .div = _div)
+
static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
@@ -136,15 +143,51 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880),
- DEF_GEN3_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
- DEF_GEN3_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
+ DEF_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
+ DEF_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("csi40", 331, R8A779A0_CLK_CSI0),
+ DEF_MOD("csi41", 400, R8A779A0_CLK_CSI0),
+ DEF_MOD("csi42", 401, R8A779A0_CLK_CSI0),
+ DEF_MOD("csi43", 402, R8A779A0_CLK_CSI0),
DEF_MOD("scif0", 702, R8A779A0_CLK_S1D8),
DEF_MOD("scif1", 703, R8A779A0_CLK_S1D8),
DEF_MOD("scif3", 704, R8A779A0_CLK_S1D8),
DEF_MOD("scif4", 705, R8A779A0_CLK_S1D8),
+ DEF_MOD("vin00", 730, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin01", 731, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin02", 800, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin03", 801, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin04", 802, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin05", 803, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin06", 804, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin07", 805, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin10", 806, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin11", 807, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin12", 808, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin13", 809, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin14", 810, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin15", 811, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin16", 812, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin17", 813, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin20", 814, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin21", 815, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin22", 816, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin23", 817, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin24", 818, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin25", 819, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin26", 820, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin27", 821, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin30", 822, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin31", 823, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin32", 824, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin33", 825, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin34", 826, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin35", 827, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin36", 828, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin37", 829, R8A779A0_CLK_S1D1),
};
static spinlock_t cpg_lock;
@@ -153,7 +196,7 @@ static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
-struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
+static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
struct clk **clks, void __iomem *base,
struct raw_notifier_head *notifiers)
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 488f8b3980c5..063b61151488 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -224,10 +224,9 @@ static struct clk * __init cpg_z_clk_register(const char *name,
#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
-#define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \
+#define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
{ \
.val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
- ((stp_ck) ? CPG_SD_STP_CK : 0) | \
((sd_srcfc) << 2) | \
((sd_fc) << 0), \
.div = (sd_div), \
@@ -247,36 +246,36 @@ struct sd_clock {
};
/* SDn divider
- * sd_srcfc sd_fc div
- * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc
- *-------------------------------------------------------------------
- * 0 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
- * 0 0 1 (2) 1 (4) 8 : SDR50
- * 1 0 2 (4) 1 (4) 16 : HS / SDR25
- * 1 0 3 (8) 1 (4) 32 : NS / SDR12
- * 1 0 4 (16) 1 (4) 64
- * 0 0 0 (1) 0 (2) 2
- * 0 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
- * 1 0 2 (4) 0 (2) 8
- * 1 0 3 (8) 0 (2) 16
- * 1 0 4 (16) 0 (2) 32
+ * sd_srcfc sd_fc div
+ * stp_hck (div) (div) = sd_srcfc x sd_fc
+ *---------------------------------------------------------
+ * 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
+ * 0 1 (2) 1 (4) 8 : SDR50
+ * 1 2 (4) 1 (4) 16 : HS / SDR25
+ * 1 3 (8) 1 (4) 32 : NS / SDR12
+ * 1 4 (16) 1 (4) 64
+ * 0 0 (1) 0 (2) 2
+ * 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
+ * 1 2 (4) 0 (2) 8
+ * 1 3 (8) 0 (2) 16
+ * 1 4 (16) 0 (2) 32
*
* NOTE: There is a quirk option to ignore the first row of the dividers
* table when searching for suitable settings. This is because HS400 on
* early ES versions of H3 and M3-W requires a specific setting to work.
*/
static const struct sd_div_table cpg_sd_div_table[] = {
-/* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4),
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8),
- CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16),
- CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32),
- CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64),
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2),
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4),
- CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8),
- CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16),
- CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32),
+/* CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) */
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 4),
+ CPG_SD_DIV_TABLE_DATA(0, 1, 1, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 2, 1, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 3, 1, 32),
+ CPG_SD_DIV_TABLE_DATA(1, 4, 1, 64),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 2),
+ CPG_SD_DIV_TABLE_DATA(0, 1, 0, 4),
+ CPG_SD_DIV_TABLE_DATA(1, 2, 0, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 3, 0, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 4, 0, 32),
};
#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
@@ -696,6 +695,34 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
cpg_rpcsrc_div_table,
&cpg_lock);
+ case CLK_TYPE_GEN3_E3_RPCSRC:
+ /*
+ * Register RPCSRC as fixed factor clock based on the
+ * MD[4:1] pins and CPG_RPCCKCR[4:3] register value for
+ * which has been set prior to booting the kernel.
+ */
+ value = (readl(base + CPG_RPCCKCR) & GENMASK(4, 3)) >> 3;
+
+ switch (value) {
+ case 0:
+ div = 5;
+ break;
+ case 1:
+ div = 3;
+ break;
+ case 2:
+ parent = clks[core->parent >> 16];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ div = core->div;
+ break;
+ case 3:
+ default:
+ div = 2;
+ break;
+ }
+ break;
+
case CLK_TYPE_GEN3_RPC:
return cpg_rpc_clk_register(core->name, base,
__clk_get_name(parent), notifiers);
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index c4ac80cac6a0..3d949c4a3244 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -24,6 +24,7 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
CLK_TYPE_GEN3_RPCSRC,
+ CLK_TYPE_GEN3_E3_RPCSRC,
CLK_TYPE_GEN3_RPC,
CLK_TYPE_GEN3_RPCD2,
@@ -54,6 +55,10 @@ enum rcar_gen3_clk_types {
#define DEF_GEN3_Z(_name, _id, _type, _parent, _div, _offset) \
DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset)
+#define DEF_FIXED_RPCSRC_E3(_name, _id, _parent0, _parent1) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_E3_RPCSRC, \
+ (_parent0) << 16 | (_parent1), .div = 8)
+
struct rcar_gen3_cpg_pll_config {
u8 extal_div;
u8 pll1_mult;
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index d4c02986c34e..3abafd78f7c8 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -160,7 +160,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- priv->rsts = devm_reset_control_array_get(dev, true, false);
+ priv->rsts = devm_reset_control_array_get_shared(dev);
if (IS_ERR(priv->rsts))
return PTR_ERR(priv->rsts);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 94db88370337..1c3215dc4877 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -119,7 +119,8 @@ static const u16 srstclr_for_v3u[] = {
};
/**
- * Clock Pulse Generator / Module Standby and Software Reset Private Data
+ * struct cpg_mssr_priv - Clock Pulse Generator / Module Standby
+ * and Software Reset Private Data
*
* @rcdev: Optional reset controller entity
* @dev: CPG/MSSR device
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index 47cd6c5de837..effd05032e85 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -11,67 +11,77 @@ config COMMON_CLK_ROCKCHIP
if COMMON_CLK_ROCKCHIP
config CLK_PX30
bool "Rockchip PX30 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for PX30 Clock Driver.
config CLK_RV110X
bool "Rockchip RV110x clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RV110x Clock Driver.
config CLK_RK3036
bool "Rockchip RK3036 clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK3036 Clock Driver.
config CLK_RK312X
bool "Rockchip RK312x clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK312x Clock Driver.
config CLK_RK3188
bool "Rockchip RK3188 clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK3188 Clock Driver.
config CLK_RK322X
bool "Rockchip RK322x clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK322x Clock Driver.
config CLK_RK3288
bool "Rockchip RK3288 clock controller support"
- depends on ARM
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK3288 Clock Driver.
config CLK_RK3308
bool "Rockchip RK3308 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for RK3308 Clock Driver.
config CLK_RK3328
bool "Rockchip RK3328 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for RK3328 Clock Driver.
config CLK_RK3368
bool "Rockchip RK3368 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for RK3368 Clock Driver.
config CLK_RK3399
tristate "Rockchip RK3399 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for RK3399 Clock Driver.
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 730020fcc7fe..0b76ad34de00 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -255,19 +255,19 @@ static struct rockchip_clk_branch common_spdif_fracmux __initdata =
RK2928_CLKSEL_CON(5), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_uart0_fracmux __initdata =
- MUX(SCLK_UART0, "sclk_uart0", mux_sclk_uart0_p, 0,
+ MUX(SCLK_UART0, "sclk_uart0", mux_sclk_uart0_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(13), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_uart1_fracmux __initdata =
- MUX(SCLK_UART1, "sclk_uart1", mux_sclk_uart1_p, 0,
+ MUX(SCLK_UART1, "sclk_uart1", mux_sclk_uart1_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(14), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_uart2_fracmux __initdata =
- MUX(SCLK_UART2, "sclk_uart2", mux_sclk_uart2_p, 0,
+ MUX(SCLK_UART2, "sclk_uart2", mux_sclk_uart2_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(15), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_uart3_fracmux __initdata =
- MUX(SCLK_UART3, "sclk_uart3", mux_sclk_uart3_p, 0,
+ MUX(SCLK_UART3, "sclk_uart3", mux_sclk_uart3_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(16), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_clk_branches[] __initdata = {
@@ -408,28 +408,28 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "uart0_pre", "uart_src", 0,
RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(1), 8, GFLAGS),
- COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_pre", 0,
+ COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(17), 0,
RK2928_CLKGATE_CON(1), 9, GFLAGS,
&common_uart0_fracmux),
COMPOSITE_NOMUX(0, "uart1_pre", "uart_src", 0,
RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(1), 10, GFLAGS),
- COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_pre", 0,
+ COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(18), 0,
RK2928_CLKGATE_CON(1), 11, GFLAGS,
&common_uart1_fracmux),
COMPOSITE_NOMUX(0, "uart2_pre", "uart_src", 0,
RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(1), 12, GFLAGS),
- COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_pre", 0,
+ COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(19), 0,
RK2928_CLKGATE_CON(1), 13, GFLAGS,
&common_uart2_fracmux),
COMPOSITE_NOMUX(0, "uart3_pre", "uart_src", 0,
RK2928_CLKSEL_CON(16), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(1), 14, GFLAGS),
- COMPOSITE_FRACMUX(0, "uart3_frac", "uart3_pre", 0,
+ COMPOSITE_FRACMUX(0, "uart3_frac", "uart3_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(20), 0,
RK2928_CLKGATE_CON(1), 15, GFLAGS,
&common_uart3_fracmux),
@@ -449,7 +449,6 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
/* hclk_cpu gates */
GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS),
- GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 1, GFLAGS),
GATE(0, "hclk_cpubus", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 8, GFLAGS),
/* hclk_ahb2apb is part of a clk branch */
@@ -543,15 +542,15 @@ static struct clk_div_table div_aclk_cpu_t[] = {
};
static struct rockchip_clk_branch rk3066a_i2s0_fracmux __initdata =
- MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+ MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(2), 8, 2, MFLAGS);
static struct rockchip_clk_branch rk3066a_i2s1_fracmux __initdata =
- MUX(SCLK_I2S1, "sclk_i2s1", mux_sclk_i2s1_p, 0,
+ MUX(SCLK_I2S1, "sclk_i2s1", mux_sclk_i2s1_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(3), 8, 2, MFLAGS);
static struct rockchip_clk_branch rk3066a_i2s2_fracmux __initdata =
- MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0,
+ MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(4), 8, 2, MFLAGS);
static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
@@ -615,27 +614,28 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(2), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 7, GFLAGS),
- COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", 0,
+ COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(6), 0,
RK2928_CLKGATE_CON(0), 8, GFLAGS,
&rk3066a_i2s0_fracmux),
COMPOSITE_NOMUX(0, "i2s1_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 9, GFLAGS),
- COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_pre", 0,
+ COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(7), 0,
RK2928_CLKGATE_CON(0), 10, GFLAGS,
&rk3066a_i2s1_fracmux),
COMPOSITE_NOMUX(0, "i2s2_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(4), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 11, GFLAGS),
- COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_pre", 0,
+ COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(8), 0,
RK2928_CLKGATE_CON(0), 12, GFLAGS,
&rk3066a_i2s2_fracmux),
- GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
- GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_CIF1, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
GATE(HCLK_HDMI, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
@@ -728,6 +728,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 10, GFLAGS,
&rk3188_i2s0_fracmux),
+ GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index b443169dd408..336481bc6cc7 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -603,8 +603,7 @@ void rockchip_clk_protect_critical(const char *const clocks[],
for (i = 0; i < nclocks; i++) {
struct clk *clk = __clk_lookup(clocks[i]);
- if (clk)
- clk_prepare_enable(clk);
+ clk_prepare_enable(clk);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical);
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 57d4b3f20417..0441c4f73ac9 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -2,10 +2,73 @@
# Recent Exynos platforms should just select COMMON_CLK_SAMSUNG:
config COMMON_CLK_SAMSUNG
bool "Samsung Exynos clock controller support" if COMPILE_TEST
- # Clocks on ARM64 SoCs (e.g. Exynos5433, Exynos7) are chosen by
- # EXYNOS_ARM64_COMMON_CLK to avoid building them on ARMv7:
+ select S3C64XX_COMMON_CLK if ARM && ARCH_S3C64XX
+ select S5PV210_COMMON_CLK if ARM && ARCH_S5PV210
+ select EXYNOS_3250_COMMON_CLK if ARM && SOC_EXYNOS3250
+ select EXYNOS_4_COMMON_CLK if ARM && ARCH_EXYNOS4
+ select EXYNOS_5250_COMMON_CLK if ARM && SOC_EXYNOS5250
+ select EXYNOS_5260_COMMON_CLK if ARM && SOC_EXYNOS5260
+ select EXYNOS_5410_COMMON_CLK if ARM && SOC_EXYNOS5410
+ select EXYNOS_5420_COMMON_CLK if ARM && SOC_EXYNOS5420
select EXYNOS_ARM64_COMMON_CLK if ARM64 && ARCH_EXYNOS
+config S3C64XX_COMMON_CLK
+ bool "Samsung S3C64xx clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung S3C64xx SoCs.
+ Choose Y here only if you build for this SoC.
+
+config S5PV210_COMMON_CLK
+ bool "Samsung S5Pv210 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung S5Pv210 SoCs.
+ Choose Y here only if you build for this SoC.
+
+config EXYNOS_3250_COMMON_CLK
+ bool "Samsung Exynos3250 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos3250 SoCs. Choose Y here only if you build for this SoC.
+
+config EXYNOS_4_COMMON_CLK
+ bool "Samsung Exynos4 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos4212 and Exynos4412 SoCs. Choose Y here only if you build for
+ this SoC.
+
+config EXYNOS_5250_COMMON_CLK
+ bool "Samsung Exynos5250 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos5250 SoCs. Choose Y here only if you build for this SoC.
+
+config EXYNOS_5260_COMMON_CLK
+ bool "Samsung Exynos5260 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos5260 SoCs. Choose Y here only if you build for this SoC.
+
+config EXYNOS_5410_COMMON_CLK
+ bool "Samsung Exynos5410 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos5410 SoCs. Choose Y here only if you build for this SoC.
+
+config EXYNOS_5420_COMMON_CLK
+ bool "Samsung Exynos5420 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos5420 SoCs. Choose Y here only if you build for this SoC.
+
config EXYNOS_ARM64_COMMON_CLK
bool "Samsung Exynos ARMv8-family clock controller support" if COMPILE_TEST
depends on COMMON_CLK_SAMSUNG
@@ -19,6 +82,16 @@ config EXYNOS_AUDSS_CLK_CON
on some Exynos SoC variants. Choose M or Y here if you want to
use audio devices such as I2S, PCM, etc.
+config EXYNOS_CLKOUT
+ tristate "Samsung Exynos clock output driver"
+ depends on COMMON_CLK_SAMSUNG
+ default y if ARCH_EXYNOS
+ help
+ Support for the clock output (XCLKOUT) present on some of Exynos SoC
+ variants. Usually the XCLKOUT is used to monitor the status of the
+ certains clocks from SoC, but it could also be tied to other devices
+ as an input clock.
+
# For S3C24XX platforms, select following symbols:
config S3C2410_COMMON_CLK
bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 1a4e6b787978..028b2e27a37e 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -4,22 +4,22 @@
#
obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o clk-cpu.o
-obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
-obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
-obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4412-isp.o
-obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
-obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5-subcmu.o
-obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
-obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
-obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
-obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5-subcmu.o
+obj-$(CONFIG_EXYNOS_3250_COMMON_CLK) += clk-exynos3250.o
+obj-$(CONFIG_EXYNOS_4_COMMON_CLK) += clk-exynos4.o
+obj-$(CONFIG_EXYNOS_4_COMMON_CLK) += clk-exynos4412-isp.o
+obj-$(CONFIG_EXYNOS_5250_COMMON_CLK) += clk-exynos5250.o
+obj-$(CONFIG_EXYNOS_5250_COMMON_CLK) += clk-exynos5-subcmu.o
+obj-$(CONFIG_EXYNOS_5260_COMMON_CLK) += clk-exynos5260.o
+obj-$(CONFIG_EXYNOS_5410_COMMON_CLK) += clk-exynos5410.o
+obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5420.o
+obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5-subcmu.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
-obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
+obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o
-obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o
-obj-$(CONFIG_ARCH_S5PV210) += clk-s5pv210.o clk-s5pv210-audss.o
+obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
+obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 34ccb1d23bc3..e6d6cbf8c4e6 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -9,10 +9,13 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
#define EXYNOS_CLKOUT_NR_CLKS 1
#define EXYNOS_CLKOUT_PARENTS 32
@@ -28,41 +31,103 @@ struct exynos_clkout {
struct clk_mux mux;
spinlock_t slock;
void __iomem *reg;
+ struct device_node *np;
u32 pmu_debug_save;
struct clk_hw_onecell_data data;
};
-static struct exynos_clkout *clkout;
+struct exynos_clkout_variant {
+ u32 mux_mask;
+};
-static int exynos_clkout_suspend(void)
-{
- clkout->pmu_debug_save = readl(clkout->reg + EXYNOS_PMU_DEBUG_REG);
+static const struct exynos_clkout_variant exynos_clkout_exynos4 = {
+ .mux_mask = EXYNOS4_CLKOUT_MUX_MASK,
+};
- return 0;
-}
+static const struct exynos_clkout_variant exynos_clkout_exynos5 = {
+ .mux_mask = EXYNOS5_CLKOUT_MUX_MASK,
+};
-static void exynos_clkout_resume(void)
+static const struct of_device_id exynos_clkout_ids[] = {
+ {
+ .compatible = "samsung,exynos3250-pmu",
+ .data = &exynos_clkout_exynos4,
+ }, {
+ .compatible = "samsung,exynos4210-pmu",
+ .data = &exynos_clkout_exynos4,
+ }, {
+ .compatible = "samsung,exynos4412-pmu",
+ .data = &exynos_clkout_exynos4,
+ }, {
+ .compatible = "samsung,exynos5250-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, {
+ .compatible = "samsung,exynos5410-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, {
+ .compatible = "samsung,exynos5420-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, {
+ .compatible = "samsung,exynos5433-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, { }
+};
+MODULE_DEVICE_TABLE(of, exynos_clkout_ids);
+
+/*
+ * Device will be instantiated as child of PMU device without its own
+ * device node. Therefore match compatibles against parent.
+ */
+static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask)
{
- writel(clkout->pmu_debug_save, clkout->reg + EXYNOS_PMU_DEBUG_REG);
-}
+ const struct exynos_clkout_variant *variant;
+ const struct of_device_id *match;
-static struct syscore_ops exynos_clkout_syscore_ops = {
- .suspend = exynos_clkout_suspend,
- .resume = exynos_clkout_resume,
-};
+ if (!dev->parent) {
+ dev_err(dev, "not instantiated from MFD\n");
+ return -EINVAL;
+ }
+
+ match = of_match_device(exynos_clkout_ids, dev->parent);
+ if (!match) {
+ dev_err(dev, "cannot match parent device\n");
+ return -EINVAL;
+ }
+ variant = match->data;
-static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
+ *mux_mask = variant->mux_mask;
+
+ return 0;
+}
+
+static int exynos_clkout_probe(struct platform_device *pdev)
{
const char *parent_names[EXYNOS_CLKOUT_PARENTS];
struct clk *parents[EXYNOS_CLKOUT_PARENTS];
- int parent_count;
- int ret;
- int i;
+ struct exynos_clkout *clkout;
+ int parent_count, ret, i;
+ u32 mux_mask;
- clkout = kzalloc(struct_size(clkout, data.hws, EXYNOS_CLKOUT_NR_CLKS),
- GFP_KERNEL);
+ clkout = devm_kzalloc(&pdev->dev,
+ struct_size(clkout, data.hws, EXYNOS_CLKOUT_NR_CLKS),
+ GFP_KERNEL);
if (!clkout)
- return;
+ return -ENOMEM;
+
+ ret = exynos_clkout_match_parent_dev(&pdev->dev, &mux_mask);
+ if (ret)
+ return ret;
+
+ clkout->np = pdev->dev.of_node;
+ if (!clkout->np) {
+ /*
+ * pdev->dev.parent was checked by exynos_clkout_match_parent_dev()
+ * so it is not NULL.
+ */
+ clkout->np = pdev->dev.parent->of_node;
+ }
+
+ platform_set_drvdata(pdev, clkout);
spin_lock_init(&clkout->slock);
@@ -71,7 +136,7 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
char name[] = "clkoutXX";
snprintf(name, sizeof(name), "clkout%d", i);
- parents[i] = of_clk_get_by_name(node, name);
+ parents[i] = of_clk_get_by_name(clkout->np, name);
if (IS_ERR(parents[i])) {
parent_names[i] = "none";
continue;
@@ -82,11 +147,13 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
}
if (!parent_count)
- goto free_clkout;
+ return -EINVAL;
- clkout->reg = of_iomap(node, 0);
- if (!clkout->reg)
+ clkout->reg = of_iomap(clkout->np, 0);
+ if (!clkout->reg) {
+ ret = -ENODEV;
goto clks_put;
+ }
clkout->gate.reg = clkout->reg + EXYNOS_PMU_DEBUG_REG;
clkout->gate.bit_idx = EXYNOS_CLKOUT_DISABLE_SHIFT;
@@ -103,17 +170,17 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
&clk_mux_ops, NULL, NULL, &clkout->gate.hw,
&clk_gate_ops, CLK_SET_RATE_PARENT
| CLK_SET_RATE_NO_REPARENT);
- if (IS_ERR(clkout->data.hws[0]))
+ if (IS_ERR(clkout->data.hws[0])) {
+ ret = PTR_ERR(clkout->data.hws[0]);
goto err_unmap;
+ }
clkout->data.num = EXYNOS_CLKOUT_NR_CLKS;
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, &clkout->data);
+ ret = of_clk_add_hw_provider(clkout->np, of_clk_hw_onecell_get, &clkout->data);
if (ret)
goto err_clk_unreg;
- register_syscore_ops(&exynos_clkout_syscore_ops);
-
- return;
+ return 0;
err_clk_unreg:
clk_hw_unregister(clkout->data.hws[0]);
@@ -123,38 +190,56 @@ clks_put:
for (i = 0; i < EXYNOS_CLKOUT_PARENTS; ++i)
if (!IS_ERR(parents[i]))
clk_put(parents[i]);
-free_clkout:
- kfree(clkout);
- pr_err("%s: failed to register clkout clock\n", __func__);
+ dev_err(&pdev->dev, "failed to register clkout clock\n");
+
+ return ret;
}
-/*
- * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
- * the OF_POPULATED flag on the pmu device tree node, so later the
- * Exynos PMU platform device can be properly probed with PMU driver.
- */
+static int exynos_clkout_remove(struct platform_device *pdev)
+{
+ struct exynos_clkout *clkout = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(clkout->np);
+ clk_hw_unregister(clkout->data.hws[0]);
+ iounmap(clkout->reg);
+
+ return 0;
+}
-static void __init exynos4_clkout_init(struct device_node *node)
+static int __maybe_unused exynos_clkout_suspend(struct device *dev)
{
- exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
+ struct exynos_clkout *clkout = dev_get_drvdata(dev);
+
+ clkout->pmu_debug_save = readl(clkout->reg + EXYNOS_PMU_DEBUG_REG);
+
+ return 0;
}
-CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
- exynos4_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
- exynos4_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
- exynos4_clkout_init);
-
-static void __init exynos5_clkout_init(struct device_node *node)
+
+static int __maybe_unused exynos_clkout_resume(struct device *dev)
{
- exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
+ struct exynos_clkout *clkout = dev_get_drvdata(dev);
+
+ writel(clkout->pmu_debug_save, clkout->reg + EXYNOS_PMU_DEBUG_REG);
+
+ return 0;
}
-CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
- exynos5_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
- exynos5_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
- exynos5_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
- exynos5_clkout_init);
+
+static SIMPLE_DEV_PM_OPS(exynos_clkout_pm_ops, exynos_clkout_suspend,
+ exynos_clkout_resume);
+
+static struct platform_driver exynos_clkout_driver = {
+ .driver = {
+ .name = "exynos-clkout",
+ .of_match_table = exynos_clkout_ids,
+ .pm = &exynos_clkout_pm_ops,
+ },
+ .probe = exynos_clkout_probe,
+ .remove = exynos_clkout_remove,
+};
+module_platform_driver(exynos_clkout_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>");
+MODULE_DESCRIPTION("Samsung Exynos clock output driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index ac70ad785d8e..5873a9354b50 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -8,14 +8,17 @@
#include <linux/errno.h>
#include <linux/hrtimer.h>
+#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/timekeeping.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include "clk.h"
#include "clk-pll.h"
-#define PLL_TIMEOUT_MS 10
+#define PLL_TIMEOUT_US 20000U
+#define PLL_TIMEOUT_LOOPS 1000000U
struct samsung_clk_pll {
struct clk_hw hw;
@@ -63,6 +66,53 @@ static long samsung_pll_round_rate(struct clk_hw *hw,
return rate_table[i - 1].rate;
}
+static bool pll_early_timeout = true;
+
+static int __init samsung_pll_disable_early_timeout(void)
+{
+ pll_early_timeout = false;
+ return 0;
+}
+arch_initcall(samsung_pll_disable_early_timeout);
+
+/* Wait until the PLL is locked */
+static int samsung_pll_lock_wait(struct samsung_clk_pll *pll,
+ unsigned int reg_mask)
+{
+ int i, ret;
+ u32 val;
+
+ /*
+ * This function might be called when the timekeeping API can't be used
+ * to detect timeouts. One situation is when the clocksource is not yet
+ * initialized, another when the timekeeping is suspended. udelay() also
+ * cannot be used when the clocksource is not running on arm64, since
+ * the current timer is used as cycle counter. So a simple busy loop
+ * is used here in that special cases. The limit of iterations has been
+ * derived from experimental measurements of various PLLs on multiple
+ * Exynos SoC variants. Single register read time was usually in range
+ * 0.4...1.5 us, never less than 0.4 us.
+ */
+ if (pll_early_timeout || timekeeping_suspended) {
+ i = PLL_TIMEOUT_LOOPS;
+ while (i-- > 0) {
+ if (readl_relaxed(pll->con_reg) & reg_mask)
+ return 0;
+
+ cpu_relax();
+ }
+ ret = -ETIMEDOUT;
+ } else {
+ ret = readl_relaxed_poll_timeout_atomic(pll->con_reg, val,
+ val & reg_mask, 0, PLL_TIMEOUT_US);
+ }
+
+ if (ret < 0)
+ pr_err("Could not lock PLL %s\n", clk_hw_get_name(&pll->hw));
+
+ return ret;
+}
+
static int samsung_pll3xxx_enable(struct clk_hw *hw)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
@@ -72,13 +122,7 @@ static int samsung_pll3xxx_enable(struct clk_hw *hw)
tmp |= BIT(pll->enable_offs);
writel_relaxed(tmp, pll->con_reg);
- /* wait lock time */
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & BIT(pll->lock_offs)));
-
- return 0;
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
}
static void samsung_pll3xxx_disable(struct clk_hw *hw)
@@ -240,13 +284,10 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
(rate->sdiv << PLL35XX_SDIV_SHIFT);
writel_relaxed(tmp, pll->con_reg);
- /* Wait until the PLL is locked if it is enabled. */
- if (tmp & BIT(pll->enable_offs)) {
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & BIT(pll->lock_offs)));
- }
+ /* Wait for PLL lock if the PLL is enabled */
+ if (tmp & BIT(pll->enable_offs))
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+
return 0;
}
@@ -318,7 +359,7 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
unsigned long parent_rate)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 tmp, pll_con0, pll_con1;
+ u32 pll_con0, pll_con1;
const struct samsung_pll_rate_table *rate;
rate = samsung_get_pll_settings(pll, drate);
@@ -356,13 +397,8 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
pll_con1 |= rate->kdiv << PLL36XX_KDIV_SHIFT;
writel_relaxed(pll_con1, pll->con_reg + 4);
- /* wait_lock_time */
- if (pll_con0 & BIT(pll->enable_offs)) {
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & BIT(pll->lock_offs)));
- }
+ if (pll_con0 & BIT(pll->enable_offs))
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
return 0;
}
@@ -437,7 +473,6 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
struct samsung_clk_pll *pll = to_clk_pll(hw);
const struct samsung_pll_rate_table *rate;
u32 con0, con1;
- ktime_t start;
/* Get required rate settings from table */
rate = samsung_get_pll_settings(pll, drate);
@@ -488,21 +523,8 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
writel_relaxed(con1, pll->con_reg + 0x4);
writel_relaxed(con0, pll->con_reg);
- /* Wait for locking. */
- start = ktime_get();
- while (!(readl_relaxed(pll->con_reg) & PLL45XX_LOCKED)) {
- ktime_t delta = ktime_sub(ktime_get(), start);
-
- if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
- pr_err("%s: could not lock PLL %s\n",
- __func__, clk_hw_get_name(hw));
- return -EFAULT;
- }
-
- cpu_relax();
- }
-
- return 0;
+ /* Wait for PLL lock */
+ return samsung_pll_lock_wait(pll, PLL45XX_LOCKED);
}
static const struct clk_ops samsung_pll45xx_clk_ops = {
@@ -588,7 +610,6 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
struct samsung_clk_pll *pll = to_clk_pll(hw);
const struct samsung_pll_rate_table *rate;
u32 con0, con1, lock;
- ktime_t start;
/* Get required rate settings from table */
rate = samsung_get_pll_settings(pll, drate);
@@ -647,21 +668,8 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
writel_relaxed(con0, pll->con_reg);
writel_relaxed(con1, pll->con_reg + 0x4);
- /* Wait for locking. */
- start = ktime_get();
- while (!(readl_relaxed(pll->con_reg) & PLL46XX_LOCKED)) {
- ktime_t delta = ktime_sub(ktime_get(), start);
-
- if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
- pr_err("%s: could not lock PLL %s\n",
- __func__, clk_hw_get_name(hw));
- return -EFAULT;
- }
-
- cpu_relax();
- }
-
- return 0;
+ /* Wait for PLL lock */
+ return samsung_pll_lock_wait(pll, PLL46XX_LOCKED);
}
static const struct clk_ops samsung_pll46xx_clk_ops = {
@@ -1035,14 +1043,9 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
(rate->sdiv << PLL2550XX_S_SHIFT);
writel_relaxed(tmp, pll->con_reg);
- /* wait_lock_time */
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & (PLL2550XX_LOCK_STAT_MASK
- << PLL2550XX_LOCK_STAT_SHIFT)));
-
- return 0;
+ /* Wait for PLL lock */
+ return samsung_pll_lock_wait(pll,
+ PLL2550XX_LOCK_STAT_MASK << PLL2550XX_LOCK_STAT_SHIFT);
}
static const struct clk_ops samsung_pll2550xx_clk_ops = {
@@ -1132,13 +1135,9 @@ static int samsung_pll2650x_set_rate(struct clk_hw *hw, unsigned long drate,
con1 |= ((rate->kdiv & PLL2650X_K_MASK) << PLL2650X_K_SHIFT);
writel_relaxed(con1, pll->con_reg + 4);
- do {
- cpu_relax();
- con0 = readl_relaxed(pll->con_reg);
- } while (!(con0 & (PLL2650X_LOCK_STAT_MASK
- << PLL2650X_LOCK_STAT_SHIFT)));
-
- return 0;
+ /* Wait for PLL lock */
+ return samsung_pll_lock_wait(pll,
+ PLL2650X_LOCK_STAT_MASK << PLL2650X_LOCK_STAT_SHIFT);
}
static const struct clk_ops samsung_pll2650x_clk_ops = {
@@ -1196,7 +1195,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
unsigned long parent_rate)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 tmp, pll_con0, pll_con2;
+ u32 pll_con0, pll_con2;
const struct samsung_pll_rate_table *rate;
rate = samsung_get_pll_settings(pll, drate);
@@ -1229,11 +1228,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
writel_relaxed(pll_con0, pll->con_reg);
writel_relaxed(pll_con2, pll->con_reg + 8);
- do {
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & (0x1 << PLL2650XX_PLL_LOCKTIME_SHIFT)));
-
- return 0;
+ return samsung_pll_lock_wait(pll, 0x1 << PLL2650XX_PLL_LOCKTIME_SHIFT);
}
static const struct clk_ops samsung_pll2650xx_clk_ops = {
diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig
index f3b4eb9cb0f5..1c14eb20c066 100644
--- a/drivers/clk/sifive/Kconfig
+++ b/drivers/clk/sifive/Kconfig
@@ -8,12 +8,12 @@ menuconfig CLK_SIFIVE
if CLK_SIFIVE
-config CLK_SIFIVE_FU540_PRCI
- bool "PRCI driver for SiFive FU540 SoCs"
+config CLK_SIFIVE_PRCI
+ bool "PRCI driver for SiFive SoCs"
select CLK_ANALOGBITS_WRPLL_CLN28HPC
help
Supports the Power Reset Clock interface (PRCI) IP block found in
- FU540 SoCs. If this kernel is meant to run on a SiFive FU540 SoC,
- enable this driver.
+ FU540/FU740 SoCs. If this kernel is meant to run on a SiFive FU540/
+ FU740 SoCs, enable this driver.
endif
diff --git a/drivers/clk/sifive/Makefile b/drivers/clk/sifive/Makefile
index 0797f14fef6b..7b06fc04e6b3 100644
--- a/drivers/clk/sifive/Makefile
+++ b/drivers/clk/sifive/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CLK_SIFIVE_FU540_PRCI) += fu540-prci.o
+obj-$(CONFIG_CLK_SIFIVE_PRCI) += sifive-prci.o fu540-prci.o fu740-prci.o
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
index a8901f90a61a..29bab915003c 100644
--- a/drivers/clk/sifive/fu540-prci.c
+++ b/drivers/clk/sifive/fu540-prci.c
@@ -1,17 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2019 SiFive, Inc.
- * Wesley Terpstra
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2018-2019 Wesley Terpstra
+ * Copyright (C) 2018-2019 Paul Walmsley
+ * Copyright (C) 2020 Zong Li
*
* The FU540 PRCI implements clock and reset control for the SiFive
* FU540-C000 chip. This driver assumes that it has sole control
@@ -24,464 +16,53 @@
* - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
*/
-#include <dt-bindings/clock/sifive-fu540-prci.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/clk/analogbits-wrpll-cln28hpc.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_clk.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-/*
- * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
- * hfclk and rtcclk
- */
-#define EXPECTED_CLK_PARENT_COUNT 2
-
-/*
- * Register offsets and bitmasks
- */
-
-/* COREPLLCFG0 */
-#define PRCI_COREPLLCFG0_OFFSET 0x4
-# define PRCI_COREPLLCFG0_DIVR_SHIFT 0
-# define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
-# define PRCI_COREPLLCFG0_DIVF_SHIFT 6
-# define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
-# define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
-# define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
-# define PRCI_COREPLLCFG0_RANGE_SHIFT 18
-# define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
-# define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
-# define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
-# define PRCI_COREPLLCFG0_FSE_SHIFT 25
-# define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
-# define PRCI_COREPLLCFG0_LOCK_SHIFT 31
-# define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
-/* DDRPLLCFG0 */
-#define PRCI_DDRPLLCFG0_OFFSET 0xc
-# define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
-# define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
-# define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
-# define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
-# define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
-# define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
-# define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
-# define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
-# define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
-# define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
-# define PRCI_DDRPLLCFG0_FSE_SHIFT 25
-# define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
-# define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
-# define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
-
-/* DDRPLLCFG1 */
-#define PRCI_DDRPLLCFG1_OFFSET 0x10
-# define PRCI_DDRPLLCFG1_CKE_SHIFT 24
-# define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
-
-/* GEMGXLPLLCFG0 */
-#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
-# define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
-# define PRCI_GEMGXLPLLCFG0_DIVR_MASK (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
-# define PRCI_GEMGXLPLLCFG0_DIVF_MASK (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
-# define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
-# define PRCI_GEMGXLPLLCFG0_RANGE_MASK (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
-# define PRCI_GEMGXLPLLCFG0_BYPASS_MASK (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
-# define PRCI_GEMGXLPLLCFG0_FSE_MASK (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
-# define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
-
-/* GEMGXLPLLCFG1 */
-#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
-# define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 24
-# define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
-
-/* CORECLKSEL */
-#define PRCI_CORECLKSEL_OFFSET 0x24
-# define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
-# define PRCI_CORECLKSEL_CORECLKSEL_MASK (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
-
-/* DEVICESRESETREG */
-#define PRCI_DEVICESRESETREG_OFFSET 0x28
-# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT 0
-# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT)
-# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT 1
-# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT)
-# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT 2
-# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT)
-# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT 3
-# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT)
-# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT 5
-# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT)
+#include <dt-bindings/clock/sifive-fu540-prci.h>
-/* CLKMUXSTATUSREG */
-#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
-# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
-# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
+#include "fu540-prci.h"
+#include "sifive-prci.h"
-/*
- * Private structures
- */
+/* PRCI integration data for each WRPLL instance */
-/**
- * struct __prci_data - per-device-instance data
- * @va: base virtual address of the PRCI IP block
- * @hw_clks: encapsulates struct clk_hw records
- *
- * PRCI per-device instance data
- */
-struct __prci_data {
- void __iomem *va;
- struct clk_hw_onecell_data hw_clks;
+static struct __prci_wrpll_data __prci_corepll_data = {
+ .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
+ .enable_bypass = sifive_prci_coreclksel_use_hfclk,
+ .disable_bypass = sifive_prci_coreclksel_use_corepll,
};
-/**
- * struct __prci_wrpll_data - WRPLL configuration and integration data
- * @c: WRPLL current configuration record
- * @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
- * @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
- * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
- *
- * @enable_bypass and @disable_bypass are used for WRPLL instances
- * that contain a separate external glitchless clock mux downstream
- * from the PLL. The WRPLL internal bypass mux is not glitchless.
- */
-struct __prci_wrpll_data {
- struct wrpll_cfg c;
- void (*enable_bypass)(struct __prci_data *pd);
- void (*disable_bypass)(struct __prci_data *pd);
- u8 cfg0_offs;
+static struct __prci_wrpll_data __prci_ddrpll_data = {
+ .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
};
-/**
- * struct __prci_clock - describes a clock device managed by PRCI
- * @name: user-readable clock name string - should match the manual
- * @parent_name: parent name for this clock
- * @ops: struct clk_ops for the Linux clock framework to use for control
- * @hw: Linux-private clock data
- * @pwd: WRPLL-specific data, associated with this clock (if not NULL)
- * @pd: PRCI-specific data associated with this clock (if not NULL)
- *
- * PRCI clock data. Used by the PRCI driver to register PRCI-provided
- * clocks to the Linux clock infrastructure.
- */
-struct __prci_clock {
- const char *name;
- const char *parent_name;
- const struct clk_ops *ops;
- struct clk_hw hw;
- struct __prci_wrpll_data *pwd;
- struct __prci_data *pd;
+static struct __prci_wrpll_data __prci_gemgxlpll_data = {
+ .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
};
-#define clk_hw_to_prci_clock(pwd) container_of(pwd, struct __prci_clock, hw)
-
-/*
- * Private functions
- */
-
-/**
- * __prci_readl() - read from a PRCI register
- * @pd: PRCI context
- * @offs: register offset to read from (in bytes, from PRCI base address)
- *
- * Read the register located at offset @offs from the base virtual
- * address of the PRCI register target described by @pd, and return
- * the value to the caller.
- *
- * Context: Any context.
- *
- * Return: the contents of the register described by @pd and @offs.
- */
-static u32 __prci_readl(struct __prci_data *pd, u32 offs)
-{
- return readl_relaxed(pd->va + offs);
-}
-
-static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
-{
- writel_relaxed(v, pd->va + offs);
-}
-
-/* WRPLL-related private functions */
-
-/**
- * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
- * @c: ptr to a struct wrpll_cfg record to write config into
- * @r: value read from the PRCI PLL configuration register
- *
- * Given a value @r read from an FU540 PRCI PLL configuration register,
- * split it into fields and populate it into the WRPLL configuration record
- * pointed to by @c.
- *
- * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
- * have the same register layout.
- *
- * Context: Any context.
- */
-static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
-{
- u32 v;
-
- v = r & PRCI_COREPLLCFG0_DIVR_MASK;
- v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
- c->divr = v;
-
- v = r & PRCI_COREPLLCFG0_DIVF_MASK;
- v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
- c->divf = v;
-
- v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
- v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
- c->divq = v;
-
- v = r & PRCI_COREPLLCFG0_RANGE_MASK;
- v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
- c->range = v;
-
- c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
- WRPLL_FLAGS_EXT_FEEDBACK_MASK);
-
- /* external feedback mode not supported */
- c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
-}
-
-/**
- * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
- * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
- *
- * Using a set of WRPLL configuration values pointed to by @c,
- * assemble a PRCI PLL configuration register value, and return it to
- * the caller.
- *
- * Context: Any context. Caller must ensure that the contents of the
- * record pointed to by @c do not change during the execution
- * of this function.
- *
- * Returns: a value suitable for writing into a PRCI PLL configuration
- * register
- */
-static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
-{
- u32 r = 0;
-
- r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
- r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
- r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
- r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
-
- /* external feedback mode not supported */
- r |= PRCI_COREPLLCFG0_FSE_MASK;
-
- return r;
-}
-
-/**
- * __prci_wrpll_read_cfg() - read the WRPLL configuration from the PRCI
- * @pd: PRCI context
- * @pwd: PRCI WRPLL metadata
- *
- * Read the current configuration of the PLL identified by @pwd from
- * the PRCI identified by @pd, and store it into the local configuration
- * cache in @pwd.
- *
- * Context: Any context. Caller must prevent the records pointed to by
- * @pd and @pwd from changing during execution.
- */
-static void __prci_wrpll_read_cfg(struct __prci_data *pd,
- struct __prci_wrpll_data *pwd)
-{
- __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
-}
-
-/**
- * __prci_wrpll_write_cfg() - write WRPLL configuration into the PRCI
- * @pd: PRCI context
- * @pwd: PRCI WRPLL metadata
- * @c: WRPLL configuration record to write
- *
- * Write the WRPLL configuration described by @c into the WRPLL
- * configuration register identified by @pwd in the PRCI instance
- * described by @c. Make a cached copy of the WRPLL's current
- * configuration so it can be used by other code.
- *
- * Context: Any context. Caller must prevent the records pointed to by
- * @pd and @pwd from changing during execution.
- */
-static void __prci_wrpll_write_cfg(struct __prci_data *pd,
- struct __prci_wrpll_data *pwd,
- struct wrpll_cfg *c)
-{
- __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
-
- memcpy(&pwd->c, c, sizeof(*c));
-}
-
-/* Core clock mux control */
-
-/**
- * __prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
- * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
- *
- * Switch the CORECLK mux to the HFCLK input source; return once complete.
- *
- * Context: Any context. Caller must prevent concurrent changes to the
- * PRCI_CORECLKSEL_OFFSET register.
- */
-static void __prci_coreclksel_use_hfclk(struct __prci_data *pd)
-{
- u32 r;
-
- r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
- r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
- __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
-
- r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
-}
-
-/**
- * __prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
- * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
- *
- * Switch the CORECLK mux to the PLL output clock; return once complete.
- *
- * Context: Any context. Caller must prevent concurrent changes to the
- * PRCI_CORECLKSEL_OFFSET register.
- */
-static void __prci_coreclksel_use_corepll(struct __prci_data *pd)
-{
- u32 r;
-
- r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
- r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
- __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
-
- r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
-}
-
-/*
- * Linux clock framework integration
- *
- * See the Linux clock framework documentation for more information on
- * these functions.
- */
-
-static unsigned long sifive_fu540_prci_wrpll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
- struct __prci_wrpll_data *pwd = pc->pwd;
-
- return wrpll_calc_output_rate(&pwd->c, parent_rate);
-}
-
-static long sifive_fu540_prci_wrpll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
-{
- struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
- struct __prci_wrpll_data *pwd = pc->pwd;
- struct wrpll_cfg c;
-
- memcpy(&c, &pwd->c, sizeof(c));
-
- wrpll_configure_for_rate(&c, rate, *parent_rate);
-
- return wrpll_calc_output_rate(&c, *parent_rate);
-}
-
-static int sifive_fu540_prci_wrpll_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
- struct __prci_wrpll_data *pwd = pc->pwd;
- struct __prci_data *pd = pc->pd;
- int r;
-
- r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
- if (r)
- return r;
-
- if (pwd->enable_bypass)
- pwd->enable_bypass(pd);
-
- __prci_wrpll_write_cfg(pd, pwd, &pwd->c);
-
- udelay(wrpll_calc_max_lock_us(&pwd->c));
-
- if (pwd->disable_bypass)
- pwd->disable_bypass(pd);
-
- return 0;
-}
+/* Linux clock framework integration */
static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
- .set_rate = sifive_fu540_prci_wrpll_set_rate,
- .round_rate = sifive_fu540_prci_wrpll_round_rate,
- .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
+ .set_rate = sifive_prci_wrpll_set_rate,
+ .round_rate = sifive_prci_wrpll_round_rate,
+ .recalc_rate = sifive_prci_wrpll_recalc_rate,
+ .enable = sifive_prci_clock_enable,
+ .disable = sifive_prci_clock_disable,
+ .is_enabled = sifive_clk_is_enabled,
};
static const struct clk_ops sifive_fu540_prci_wrpll_ro_clk_ops = {
- .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
+ .recalc_rate = sifive_prci_wrpll_recalc_rate,
};
-/* TLCLKSEL clock integration */
-
-static unsigned long sifive_fu540_prci_tlclksel_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
- struct __prci_data *pd = pc->pd;
- u32 v;
- u8 div;
-
- v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
- v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
- div = v ? 1 : 2;
-
- return div_u64(parent_rate, div);
-}
-
static const struct clk_ops sifive_fu540_prci_tlclksel_clk_ops = {
- .recalc_rate = sifive_fu540_prci_tlclksel_recalc_rate,
-};
-
-/*
- * PRCI integration data for each WRPLL instance
- */
-
-static struct __prci_wrpll_data __prci_corepll_data = {
- .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
- .enable_bypass = __prci_coreclksel_use_hfclk,
- .disable_bypass = __prci_coreclksel_use_corepll,
-};
-
-static struct __prci_wrpll_data __prci_ddrpll_data = {
- .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+ .recalc_rate = sifive_prci_tlclksel_recalc_rate,
};
-static struct __prci_wrpll_data __prci_gemgxlpll_data = {
- .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
-};
-
-/*
- * List of clock controls provided by the PRCI
- */
-
-static struct __prci_clock __prci_init_clocks[] = {
+/* List of clock controls provided by the PRCI */
+struct __prci_clock __prci_init_clocks_fu540[] = {
[PRCI_CLK_COREPLL] = {
.name = "corepll",
.parent_name = "hfclk",
@@ -506,125 +87,3 @@ static struct __prci_clock __prci_init_clocks[] = {
.ops = &sifive_fu540_prci_tlclksel_clk_ops,
},
};
-
-/**
- * __prci_register_clocks() - register clock controls in the PRCI with Linux
- * @dev: Linux struct device *
- *
- * Register the list of clock controls described in __prci_init_plls[] with
- * the Linux clock framework.
- *
- * Return: 0 upon success or a negative error code upon failure.
- */
-static int __prci_register_clocks(struct device *dev, struct __prci_data *pd)
-{
- struct clk_init_data init = { };
- struct __prci_clock *pic;
- int parent_count, i, r;
-
- parent_count = of_clk_get_parent_count(dev->of_node);
- if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
- dev_err(dev, "expected only two parent clocks, found %d\n",
- parent_count);
- return -EINVAL;
- }
-
- /* Register PLLs */
- for (i = 0; i < ARRAY_SIZE(__prci_init_clocks); ++i) {
- pic = &__prci_init_clocks[i];
-
- init.name = pic->name;
- init.parent_names = &pic->parent_name;
- init.num_parents = 1;
- init.ops = pic->ops;
- pic->hw.init = &init;
-
- pic->pd = pd;
-
- if (pic->pwd)
- __prci_wrpll_read_cfg(pd, pic->pwd);
-
- r = devm_clk_hw_register(dev, &pic->hw);
- if (r) {
- dev_warn(dev, "Failed to register clock %s: %d\n",
- init.name, r);
- return r;
- }
-
- r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
- if (r) {
- dev_warn(dev, "Failed to register clkdev for %s: %d\n",
- init.name, r);
- return r;
- }
-
- pd->hw_clks.hws[i] = &pic->hw;
- }
-
- pd->hw_clks.num = i;
-
- r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- &pd->hw_clks);
- if (r) {
- dev_err(dev, "could not add hw_provider: %d\n", r);
- return r;
- }
-
- return 0;
-}
-
-/*
- * Linux device model integration
- *
- * See the Linux device model documentation for more information about
- * these functions.
- */
-static int sifive_fu540_prci_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct __prci_data *pd;
- int r;
-
- pd = devm_kzalloc(dev,
- struct_size(pd, hw_clks.hws,
- ARRAY_SIZE(__prci_init_clocks)),
- GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pd->va = devm_ioremap_resource(dev, res);
- if (IS_ERR(pd->va))
- return PTR_ERR(pd->va);
-
- r = __prci_register_clocks(dev, pd);
- if (r) {
- dev_err(dev, "could not register clocks: %d\n", r);
- return r;
- }
-
- dev_dbg(dev, "SiFive FU540 PRCI probed\n");
-
- return 0;
-}
-
-static const struct of_device_id sifive_fu540_prci_of_match[] = {
- { .compatible = "sifive,fu540-c000-prci", },
- {}
-};
-MODULE_DEVICE_TABLE(of, sifive_fu540_prci_of_match);
-
-static struct platform_driver sifive_fu540_prci_driver = {
- .driver = {
- .name = "sifive-fu540-prci",
- .of_match_table = sifive_fu540_prci_of_match,
- },
- .probe = sifive_fu540_prci_probe,
-};
-
-static int __init sifive_fu540_prci_init(void)
-{
- return platform_driver_register(&sifive_fu540_prci_driver);
-}
-core_initcall(sifive_fu540_prci_init);
diff --git a/drivers/clk/sifive/fu540-prci.h b/drivers/clk/sifive/fu540-prci.h
new file mode 100644
index 000000000000..c8271efa7bdc
--- /dev/null
+++ b/drivers/clk/sifive/fu540-prci.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 SiFive, Inc.
+ * Zong Li
+ */
+
+#ifndef __SIFIVE_CLK_FU540_PRCI_H
+#define __SIFIVE_CLK_FU540_PRCI_H
+
+#include "sifive-prci.h"
+
+#define NUM_CLOCK_FU540 4
+
+extern struct __prci_clock __prci_init_clocks_fu540[NUM_CLOCK_FU540];
+
+static const struct prci_clk_desc prci_clk_fu540 = {
+ .clks = __prci_init_clocks_fu540,
+ .num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
+};
+
+#endif /* __SIFIVE_CLK_FU540_PRCI_H */
diff --git a/drivers/clk/sifive/fu740-prci.c b/drivers/clk/sifive/fu740-prci.c
new file mode 100644
index 000000000000..764d1097aa51
--- /dev/null
+++ b/drivers/clk/sifive/fu740-prci.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 SiFive, Inc.
+ * Copyright (C) 2020 Zong Li
+ */
+
+#include <linux/module.h>
+
+#include <dt-bindings/clock/sifive-fu740-prci.h>
+
+#include "fu540-prci.h"
+#include "sifive-prci.h"
+
+/* PRCI integration data for each WRPLL instance */
+
+static struct __prci_wrpll_data __prci_corepll_data = {
+ .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
+ .enable_bypass = sifive_prci_coreclksel_use_hfclk,
+ .disable_bypass = sifive_prci_coreclksel_use_final_corepll,
+};
+
+static struct __prci_wrpll_data __prci_ddrpll_data = {
+ .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
+};
+
+static struct __prci_wrpll_data __prci_gemgxlpll_data = {
+ .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
+};
+
+static struct __prci_wrpll_data __prci_dvfscorepll_data = {
+ .cfg0_offs = PRCI_DVFSCOREPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_DVFSCOREPLLCFG1_OFFSET,
+ .enable_bypass = sifive_prci_corepllsel_use_corepll,
+ .disable_bypass = sifive_prci_corepllsel_use_dvfscorepll,
+};
+
+static struct __prci_wrpll_data __prci_hfpclkpll_data = {
+ .cfg0_offs = PRCI_HFPCLKPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_HFPCLKPLLCFG1_OFFSET,
+ .enable_bypass = sifive_prci_hfpclkpllsel_use_hfclk,
+ .disable_bypass = sifive_prci_hfpclkpllsel_use_hfpclkpll,
+};
+
+static struct __prci_wrpll_data __prci_cltxpll_data = {
+ .cfg0_offs = PRCI_CLTXPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_CLTXPLLCFG1_OFFSET,
+};
+
+/* Linux clock framework integration */
+
+static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = {
+ .set_rate = sifive_prci_wrpll_set_rate,
+ .round_rate = sifive_prci_wrpll_round_rate,
+ .recalc_rate = sifive_prci_wrpll_recalc_rate,
+ .enable = sifive_prci_clock_enable,
+ .disable = sifive_prci_clock_disable,
+ .is_enabled = sifive_clk_is_enabled,
+};
+
+static const struct clk_ops sifive_fu740_prci_wrpll_ro_clk_ops = {
+ .recalc_rate = sifive_prci_wrpll_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu740_prci_tlclksel_clk_ops = {
+ .recalc_rate = sifive_prci_tlclksel_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu740_prci_hfpclkplldiv_clk_ops = {
+ .recalc_rate = sifive_prci_hfpclkplldiv_recalc_rate,
+};
+
+/* List of clock controls provided by the PRCI */
+struct __prci_clock __prci_init_clocks_fu740[] = {
+ [PRCI_CLK_COREPLL] = {
+ .name = "corepll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_corepll_data,
+ },
+ [PRCI_CLK_DDRPLL] = {
+ .name = "ddrpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_ro_clk_ops,
+ .pwd = &__prci_ddrpll_data,
+ },
+ [PRCI_CLK_GEMGXLPLL] = {
+ .name = "gemgxlpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_gemgxlpll_data,
+ },
+ [PRCI_CLK_DVFSCOREPLL] = {
+ .name = "dvfscorepll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_dvfscorepll_data,
+ },
+ [PRCI_CLK_HFPCLKPLL] = {
+ .name = "hfpclkpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_hfpclkpll_data,
+ },
+ [PRCI_CLK_CLTXPLL] = {
+ .name = "cltxpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_cltxpll_data,
+ },
+ [PRCI_CLK_TLCLK] = {
+ .name = "tlclk",
+ .parent_name = "corepll",
+ .ops = &sifive_fu740_prci_tlclksel_clk_ops,
+ },
+ [PRCI_CLK_PCLK] = {
+ .name = "pclk",
+ .parent_name = "hfpclkpll",
+ .ops = &sifive_fu740_prci_hfpclkplldiv_clk_ops,
+ },
+};
diff --git a/drivers/clk/sifive/fu740-prci.h b/drivers/clk/sifive/fu740-prci.h
new file mode 100644
index 000000000000..13ef971f7764
--- /dev/null
+++ b/drivers/clk/sifive/fu740-prci.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 SiFive, Inc.
+ * Zong Li
+ */
+
+#ifndef __SIFIVE_CLK_FU740_PRCI_H
+#define __SIFIVE_CLK_FU740_PRCI_H
+
+#include "sifive-prci.h"
+
+#define NUM_CLOCK_FU740 8
+
+extern struct __prci_clock __prci_init_clocks_fu740[NUM_CLOCK_FU740];
+
+static const struct prci_clk_desc prci_clk_fu740 = {
+ .clks = __prci_init_clocks_fu740,
+ .num_clks = ARRAY_SIZE(__prci_init_clocks_fu740),
+};
+
+#endif /* __SIFIVE_CLK_FU740_PRCI_H */
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
new file mode 100644
index 000000000000..c78b042750e2
--- /dev/null
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 SiFive, Inc.
+ * Copyright (C) 2020 Zong Li
+ */
+
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include "sifive-prci.h"
+#include "fu540-prci.h"
+#include "fu740-prci.h"
+
+/*
+ * Private functions
+ */
+
+/**
+ * __prci_readl() - read from a PRCI register
+ * @pd: PRCI context
+ * @offs: register offset to read from (in bytes, from PRCI base address)
+ *
+ * Read the register located at offset @offs from the base virtual
+ * address of the PRCI register target described by @pd, and return
+ * the value to the caller.
+ *
+ * Context: Any context.
+ *
+ * Return: the contents of the register described by @pd and @offs.
+ */
+static u32 __prci_readl(struct __prci_data *pd, u32 offs)
+{
+ return readl_relaxed(pd->va + offs);
+}
+
+static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
+{
+ writel_relaxed(v, pd->va + offs);
+}
+
+/* WRPLL-related private functions */
+
+/**
+ * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
+ * @c: ptr to a struct wrpll_cfg record to write config into
+ * @r: value read from the PRCI PLL configuration register
+ *
+ * Given a value @r read from an FU740 PRCI PLL configuration register,
+ * split it into fields and populate it into the WRPLL configuration record
+ * pointed to by @c.
+ *
+ * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
+ * have the same register layout.
+ *
+ * Context: Any context.
+ */
+static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
+{
+ u32 v;
+
+ v = r & PRCI_COREPLLCFG0_DIVR_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
+ c->divr = v;
+
+ v = r & PRCI_COREPLLCFG0_DIVF_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
+ c->divf = v;
+
+ v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
+ c->divq = v;
+
+ v = r & PRCI_COREPLLCFG0_RANGE_MASK;
+ v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
+ c->range = v;
+
+ c->flags &=
+ (WRPLL_FLAGS_INT_FEEDBACK_MASK | WRPLL_FLAGS_EXT_FEEDBACK_MASK);
+
+ /* external feedback mode not supported */
+ c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
+}
+
+/**
+ * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
+ * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
+ *
+ * Using a set of WRPLL configuration values pointed to by @c,
+ * assemble a PRCI PLL configuration register value, and return it to
+ * the caller.
+ *
+ * Context: Any context. Caller must ensure that the contents of the
+ * record pointed to by @c do not change during the execution
+ * of this function.
+ *
+ * Returns: a value suitable for writing into a PRCI PLL configuration
+ * register
+ */
+static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
+{
+ u32 r = 0;
+
+ r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
+ r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
+ r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
+ r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
+
+ /* external feedback mode not supported */
+ r |= PRCI_COREPLLCFG0_FSE_MASK;
+
+ return r;
+}
+
+/**
+ * __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ *
+ * Read the current configuration of the PLL identified by @pwd from
+ * the PRCI identified by @pd, and store it into the local configuration
+ * cache in @pwd.
+ *
+ * Context: Any context. Caller must prevent the records pointed to by
+ * @pd and @pwd from changing during execution.
+ */
+static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd)
+{
+ __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
+}
+
+/**
+ * __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ * @c: WRPLL configuration record to write
+ *
+ * Write the WRPLL configuration described by @c into the WRPLL
+ * configuration register identified by @pwd in the PRCI instance
+ * described by @c. Make a cached copy of the WRPLL's current
+ * configuration so it can be used by other code.
+ *
+ * Context: Any context. Caller must prevent the records pointed to by
+ * @pd and @pwd from changing during execution.
+ */
+static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd,
+ struct wrpll_cfg *c)
+{
+ __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
+
+ memcpy(&pwd->c, c, sizeof(*c));
+}
+
+/**
+ * __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
+ * into the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ * @enable: Clock enable or disable value
+ */
+static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd,
+ u32 enable)
+{
+ __prci_writel(enable, pwd->cfg1_offs, pd);
+}
+
+/*
+ * Linux clock framework integration
+ *
+ * See the Linux clock framework documentation for more information on
+ * these functions.
+ */
+
+unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+
+ return wrpll_calc_output_rate(&pwd->c, parent_rate);
+}
+
+long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct wrpll_cfg c;
+
+ memcpy(&c, &pwd->c, sizeof(c));
+
+ wrpll_configure_for_rate(&c, rate, *parent_rate);
+
+ return wrpll_calc_output_rate(&c, *parent_rate);
+}
+
+int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+ int r;
+
+ r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
+ if (r)
+ return r;
+
+ if (pwd->enable_bypass)
+ pwd->enable_bypass(pd);
+
+ __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
+
+ udelay(wrpll_calc_max_lock_us(&pwd->c));
+
+ return 0;
+}
+
+int sifive_clk_is_enabled(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+ u32 r;
+
+ r = __prci_readl(pd, pwd->cfg1_offs);
+
+ if (r & PRCI_COREPLLCFG1_CKE_MASK)
+ return 1;
+ else
+ return 0;
+}
+
+int sifive_prci_clock_enable(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+
+ if (sifive_clk_is_enabled(hw))
+ return 0;
+
+ __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
+
+ if (pwd->disable_bypass)
+ pwd->disable_bypass(pd);
+
+ return 0;
+}
+
+void sifive_prci_clock_disable(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+ u32 r;
+
+ if (pwd->enable_bypass)
+ pwd->enable_bypass(pd);
+
+ r = __prci_readl(pd, pwd->cfg1_offs);
+ r &= ~PRCI_COREPLLCFG1_CKE_MASK;
+
+ __prci_wrpll_write_cfg1(pd, pwd, r);
+}
+
+/* TLCLKSEL clock integration */
+
+unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 v;
+ u8 div;
+
+ v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
+ v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
+ div = v ? 1 : 2;
+
+ return div_u64(parent_rate, div);
+}
+
+/* HFPCLK clock integration */
+
+unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
+
+ return div_u64(parent_rate, div + 2);
+}
+
+/*
+ * Core clock mux control
+ */
+
+/**
+ * sifive_prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the HFCLK input source; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_coreclksel_use_corepll() - switch the CORECLK mux to output
+ * COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the COREPLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_coreclksel_use_final_corepll() - switch the CORECLK mux to output
+ * FINAL_COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the final COREPLL output clock; return once
+ * complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_corepllsel_use_dvfscorepll() - switch the COREPLL mux to
+ * output DVFS_COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
+ *
+ * Switch the COREPLL mux to the DVFSCOREPLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_COREPLLSEL_OFFSET register.
+ */
+void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
+ r |= PRCI_COREPLLSEL_COREPLLSEL_MASK;
+ __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_corepllsel_use_corepll() - switch the COREPLL mux to
+ * output COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
+ *
+ * Switch the COREPLL mux to the COREPLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_COREPLLSEL_OFFSET register.
+ */
+void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
+ r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK;
+ __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_hfpclkpllsel_use_hfclk() - switch the HFPCLKPLL mux to
+ * output HFCLK
+ * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
+ *
+ * Switch the HFPCLKPLL mux to the HFCLK input source; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_HFPCLKPLLSEL_OFFSET register.
+ */
+void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
+ r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
+ __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_hfpclkpllsel_use_hfpclkpll() - switch the HFPCLKPLL mux to
+ * output HFPCLKPLL
+ * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
+ *
+ * Switch the HFPCLKPLL mux to the HFPCLKPLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_HFPCLKPLLSEL_OFFSET register.
+ */
+void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
+ r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
+ __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
+}
+
+/**
+ * __prci_register_clocks() - register clock controls in the PRCI
+ * @dev: Linux struct device
+ * @pd: The pointer for PRCI per-device instance data
+ * @desc: The pointer for the information of clocks of each SoCs
+ *
+ * Register the list of clock controls described in __prci_init_clocks[] with
+ * the Linux clock framework.
+ *
+ * Return: 0 upon success or a negative error code upon failure.
+ */
+static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
+ const struct prci_clk_desc *desc)
+{
+ struct clk_init_data init = { };
+ struct __prci_clock *pic;
+ int parent_count, i, r;
+
+ parent_count = of_clk_get_parent_count(dev->of_node);
+ if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
+ dev_err(dev, "expected only two parent clocks, found %d\n",
+ parent_count);
+ return -EINVAL;
+ }
+
+ /* Register PLLs */
+ for (i = 0; i < desc->num_clks; ++i) {
+ pic = &(desc->clks[i]);
+
+ init.name = pic->name;
+ init.parent_names = &pic->parent_name;
+ init.num_parents = 1;
+ init.ops = pic->ops;
+ pic->hw.init = &init;
+
+ pic->pd = pd;
+
+ if (pic->pwd)
+ __prci_wrpll_read_cfg0(pd, pic->pwd);
+
+ r = devm_clk_hw_register(dev, &pic->hw);
+ if (r) {
+ dev_warn(dev, "Failed to register clock %s: %d\n",
+ init.name, r);
+ return r;
+ }
+
+ r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
+ if (r) {
+ dev_warn(dev, "Failed to register clkdev for %s: %d\n",
+ init.name, r);
+ return r;
+ }
+
+ pd->hw_clks.hws[i] = &pic->hw;
+ }
+
+ pd->hw_clks.num = i;
+
+ r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &pd->hw_clks);
+ if (r) {
+ dev_err(dev, "could not add hw_provider: %d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * sifive_prci_init() - initialize prci data and check parent count
+ * @pdev: platform device pointer for the prci
+ *
+ * Return: 0 upon success or a negative error code upon failure.
+ */
+static int sifive_prci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct __prci_data *pd;
+ const struct prci_clk_desc *desc;
+ int r;
+
+ desc = of_device_get_match_data(&pdev->dev);
+
+ pd = devm_kzalloc(dev, struct_size(pd, hw_clks.hws, desc->num_clks), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pd->va = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pd->va))
+ return PTR_ERR(pd->va);
+
+ r = __prci_register_clocks(dev, pd, desc);
+ if (r) {
+ dev_err(dev, "could not register clocks: %d\n", r);
+ return r;
+ }
+
+ dev_dbg(dev, "SiFive PRCI probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id sifive_prci_of_match[] = {
+ {.compatible = "sifive,fu540-c000-prci", .data = &prci_clk_fu540},
+ {.compatible = "sifive,fu740-c000-prci", .data = &prci_clk_fu740},
+ {}
+};
+
+static struct platform_driver sifive_prci_driver = {
+ .driver = {
+ .name = "sifive-clk-prci",
+ .of_match_table = sifive_prci_of_match,
+ },
+ .probe = sifive_prci_probe,
+};
+
+static int __init sifive_prci_init(void)
+{
+ return platform_driver_register(&sifive_prci_driver);
+}
+core_initcall(sifive_prci_init);
diff --git a/drivers/clk/sifive/sifive-prci.h b/drivers/clk/sifive/sifive-prci.h
new file mode 100644
index 000000000000..dbdbd1722688
--- /dev/null
+++ b/drivers/clk/sifive/sifive-prci.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ * Zong Li
+ */
+
+#ifndef __SIFIVE_CLK_SIFIVE_PRCI_H
+#define __SIFIVE_CLK_SIFIVE_PRCI_H
+
+#include <linux/clk/analogbits-wrpll-cln28hpc.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+/*
+ * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
+ * hfclk and rtcclk
+ */
+#define EXPECTED_CLK_PARENT_COUNT 2
+
+/*
+ * Register offsets and bitmasks
+ */
+
+/* COREPLLCFG0 */
+#define PRCI_COREPLLCFG0_OFFSET 0x4
+#define PRCI_COREPLLCFG0_DIVR_SHIFT 0
+#define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
+#define PRCI_COREPLLCFG0_DIVF_SHIFT 6
+#define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
+#define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
+#define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
+#define PRCI_COREPLLCFG0_RANGE_SHIFT 18
+#define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
+#define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
+#define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
+#define PRCI_COREPLLCFG0_FSE_SHIFT 25
+#define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
+#define PRCI_COREPLLCFG0_LOCK_SHIFT 31
+#define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
+
+/* COREPLLCFG1 */
+#define PRCI_COREPLLCFG1_OFFSET 0x8
+#define PRCI_COREPLLCFG1_CKE_SHIFT 31
+#define PRCI_COREPLLCFG1_CKE_MASK (0x1 << PRCI_COREPLLCFG1_CKE_SHIFT)
+
+/* DDRPLLCFG0 */
+#define PRCI_DDRPLLCFG0_OFFSET 0xc
+#define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
+#define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
+#define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
+#define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
+#define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
+#define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
+#define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
+#define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
+#define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
+#define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
+#define PRCI_DDRPLLCFG0_FSE_SHIFT 25
+#define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
+#define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
+#define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
+
+/* DDRPLLCFG1 */
+#define PRCI_DDRPLLCFG1_OFFSET 0x10
+#define PRCI_DDRPLLCFG1_CKE_SHIFT 31
+#define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
+
+/* GEMGXLPLLCFG0 */
+#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
+#define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
+#define PRCI_GEMGXLPLLCFG0_DIVR_MASK (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
+#define PRCI_GEMGXLPLLCFG0_DIVF_MASK (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
+#define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
+#define PRCI_GEMGXLPLLCFG0_RANGE_MASK (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
+#define PRCI_GEMGXLPLLCFG0_BYPASS_MASK (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
+#define PRCI_GEMGXLPLLCFG0_FSE_MASK (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
+#define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
+
+/* GEMGXLPLLCFG1 */
+#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
+#define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 31
+#define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
+
+/* CORECLKSEL */
+#define PRCI_CORECLKSEL_OFFSET 0x24
+#define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
+#define PRCI_CORECLKSEL_CORECLKSEL_MASK \
+ (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
+
+/* DEVICESRESETREG */
+#define PRCI_DEVICESRESETREG_OFFSET 0x28
+#define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT 0
+#define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT 1
+#define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT 2
+#define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT 3
+#define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT 5
+#define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_CHIPLINK_RST_N_SHIFT 6
+#define PRCI_DEVICESRESETREG_CHIPLINK_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_CHIPLINK_RST_N_SHIFT)
+
+/* CLKMUXSTATUSREG */
+#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
+#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
+#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK \
+ (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
+
+/* CLTXPLLCFG0 */
+#define PRCI_CLTXPLLCFG0_OFFSET 0x30
+#define PRCI_CLTXPLLCFG0_DIVR_SHIFT 0
+#define PRCI_CLTXPLLCFG0_DIVR_MASK (0x3f << PRCI_CLTXPLLCFG0_DIVR_SHIFT)
+#define PRCI_CLTXPLLCFG0_DIVF_SHIFT 6
+#define PRCI_CLTXPLLCFG0_DIVF_MASK (0x1ff << PRCI_CLTXPLLCFG0_DIVF_SHIFT)
+#define PRCI_CLTXPLLCFG0_DIVQ_SHIFT 15
+#define PRCI_CLTXPLLCFG0_DIVQ_MASK (0x7 << PRCI_CLTXPLLCFG0_DIVQ_SHIFT)
+#define PRCI_CLTXPLLCFG0_RANGE_SHIFT 18
+#define PRCI_CLTXPLLCFG0_RANGE_MASK (0x7 << PRCI_CLTXPLLCFG0_RANGE_SHIFT)
+#define PRCI_CLTXPLLCFG0_BYPASS_SHIFT 24
+#define PRCI_CLTXPLLCFG0_BYPASS_MASK (0x1 << PRCI_CLTXPLLCFG0_BYPASS_SHIFT)
+#define PRCI_CLTXPLLCFG0_FSE_SHIFT 25
+#define PRCI_CLTXPLLCFG0_FSE_MASK (0x1 << PRCI_CLTXPLLCFG0_FSE_SHIFT)
+#define PRCI_CLTXPLLCFG0_LOCK_SHIFT 31
+#define PRCI_CLTXPLLCFG0_LOCK_MASK (0x1 << PRCI_CLTXPLLCFG0_LOCK_SHIFT)
+
+/* CLTXPLLCFG1 */
+#define PRCI_CLTXPLLCFG1_OFFSET 0x34
+#define PRCI_CLTXPLLCFG1_CKE_SHIFT 31
+#define PRCI_CLTXPLLCFG1_CKE_MASK (0x1 << PRCI_CLTXPLLCFG1_CKE_SHIFT)
+
+/* DVFSCOREPLLCFG0 */
+#define PRCI_DVFSCOREPLLCFG0_OFFSET 0x38
+
+/* DVFSCOREPLLCFG1 */
+#define PRCI_DVFSCOREPLLCFG1_OFFSET 0x3c
+#define PRCI_DVFSCOREPLLCFG1_CKE_SHIFT 31
+#define PRCI_DVFSCOREPLLCFG1_CKE_MASK (0x1 << PRCI_DVFSCOREPLLCFG1_CKE_SHIFT)
+
+/* COREPLLSEL */
+#define PRCI_COREPLLSEL_OFFSET 0x40
+#define PRCI_COREPLLSEL_COREPLLSEL_SHIFT 0
+#define PRCI_COREPLLSEL_COREPLLSEL_MASK \
+ (0x1 << PRCI_COREPLLSEL_COREPLLSEL_SHIFT)
+
+/* HFPCLKPLLCFG0 */
+#define PRCI_HFPCLKPLLCFG0_OFFSET 0x50
+#define PRCI_HFPCLKPLL_CFG0_DIVR_SHIFT 0
+#define PRCI_HFPCLKPLL_CFG0_DIVR_MASK \
+ (0x3f << PRCI_HFPCLKPLLCFG0_DIVR_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_DIVF_SHIFT 6
+#define PRCI_HFPCLKPLL_CFG0_DIVF_MASK \
+ (0x1ff << PRCI_HFPCLKPLLCFG0_DIVF_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_DIVQ_SHIFT 15
+#define PRCI_HFPCLKPLL_CFG0_DIVQ_MASK \
+ (0x7 << PRCI_HFPCLKPLLCFG0_DIVQ_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_RANGE_SHIFT 18
+#define PRCI_HFPCLKPLL_CFG0_RANGE_MASK \
+ (0x7 << PRCI_HFPCLKPLLCFG0_RANGE_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_BYPASS_SHIFT 24
+#define PRCI_HFPCLKPLL_CFG0_BYPASS_MASK \
+ (0x1 << PRCI_HFPCLKPLLCFG0_BYPASS_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_FSE_SHIFT 25
+#define PRCI_HFPCLKPLL_CFG0_FSE_MASK \
+ (0x1 << PRCI_HFPCLKPLLCFG0_FSE_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_LOCK_SHIFT 31
+#define PRCI_HFPCLKPLL_CFG0_LOCK_MASK \
+ (0x1 << PRCI_HFPCLKPLLCFG0_LOCK_SHIFT)
+
+/* HFPCLKPLLCFG1 */
+#define PRCI_HFPCLKPLLCFG1_OFFSET 0x54
+#define PRCI_HFPCLKPLLCFG1_CKE_SHIFT 31
+#define PRCI_HFPCLKPLLCFG1_CKE_MASK \
+ (0x1 << PRCI_HFPCLKPLLCFG1_CKE_SHIFT)
+
+/* HFPCLKPLLSEL */
+#define PRCI_HFPCLKPLLSEL_OFFSET 0x58
+#define PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_SHIFT 0
+#define PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK \
+ (0x1 << PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_SHIFT)
+
+/* HFPCLKPLLDIV */
+#define PRCI_HFPCLKPLLDIV_OFFSET 0x5c
+
+/* PRCIPLL */
+#define PRCI_PRCIPLL_OFFSET 0xe0
+
+/* PROCMONCFG */
+#define PRCI_PROCMONCFG_OFFSET 0xf0
+
+/*
+ * Private structures
+ */
+
+/**
+ * struct __prci_data - per-device-instance data
+ * @va: base virtual address of the PRCI IP block
+ * @hw_clks: encapsulates struct clk_hw records
+ *
+ * PRCI per-device instance data
+ */
+struct __prci_data {
+ void __iomem *va;
+ struct clk_hw_onecell_data hw_clks;
+};
+
+/**
+ * struct __prci_wrpll_data - WRPLL configuration and integration data
+ * @c: WRPLL current configuration record
+ * @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
+ * @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
+ * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
+ * @cfg1_offs: WRPLL CFG1 register offset (in bytes) from the PRCI base address
+ *
+ * @enable_bypass and @disable_bypass are used for WRPLL instances
+ * that contain a separate external glitchless clock mux downstream
+ * from the PLL. The WRPLL internal bypass mux is not glitchless.
+ */
+struct __prci_wrpll_data {
+ struct wrpll_cfg c;
+ void (*enable_bypass)(struct __prci_data *pd);
+ void (*disable_bypass)(struct __prci_data *pd);
+ u8 cfg0_offs;
+ u8 cfg1_offs;
+};
+
+/**
+ * struct __prci_clock - describes a clock device managed by PRCI
+ * @name: user-readable clock name string - should match the manual
+ * @parent_name: parent name for this clock
+ * @ops: struct clk_ops for the Linux clock framework to use for control
+ * @hw: Linux-private clock data
+ * @pwd: WRPLL-specific data, associated with this clock (if not NULL)
+ * @pd: PRCI-specific data associated with this clock (if not NULL)
+ *
+ * PRCI clock data. Used by the PRCI driver to register PRCI-provided
+ * clocks to the Linux clock infrastructure.
+ */
+struct __prci_clock {
+ const char *name;
+ const char *parent_name;
+ const struct clk_ops *ops;
+ struct clk_hw hw;
+ struct __prci_wrpll_data *pwd;
+ struct __prci_data *pd;
+};
+
+#define clk_hw_to_prci_clock(pwd) container_of(pwd, struct __prci_clock, hw)
+
+/*
+ * struct prci_clk_desc - describes the information of clocks of each SoCs
+ * @clks: point to a array of __prci_clock
+ * @num_clks: the number of element of clks
+ */
+struct prci_clk_desc {
+ struct __prci_clock *clks;
+ size_t num_clks;
+};
+
+/* Core clock mux control */
+void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd);
+void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd);
+void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd);
+void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd);
+void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd);
+void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd);
+void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd);
+
+/* Linux clock framework integration */
+long sifive_prci_wrpll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate);
+int sifive_prci_wrpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+int sifive_clk_is_enabled(struct clk_hw *hw);
+int sifive_prci_clock_enable(struct clk_hw *hw);
+void sifive_prci_clock_disable(struct clk_hw *hw);
+unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+#endif /* __SIFIVE_CLK_SIFIVE_PRCI_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 5f66bf879772..149cfde817cb 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -389,6 +389,7 @@ static struct clk_div_table ths_div_table[] = {
{ .val = 1, .div = 2 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 6 },
+ { /* Sentinel */ },
};
static const char * const ths_parents[] = { "osc24M" };
static struct ccu_div ths_clk = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 6b636362379e..7e629a4493af 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -322,6 +322,7 @@ static struct clk_div_table ths_div_table[] = {
{ .val = 1, .div = 2 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 6 },
+ { /* Sentinel */ },
};
static SUNXI_CCU_DIV_TABLE_WITH_GATE(ths_clk, "ths", "osc24M",
0x074, 0, 2, ths_div_table, BIT(31), 0);
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index a66263b6490d..6ecf18f71c32 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2016 NVIDIA Corporation
+ * Copyright (C) 2016-2020 NVIDIA Corporation
*/
#include <linux/clk-provider.h>
@@ -174,7 +174,7 @@ static long tegra_bpmp_clk_round_rate(struct clk_hw *hw, unsigned long rate,
int err;
memset(&request, 0, sizeof(request));
- request.rate = rate;
+ request.rate = min_t(u64, rate, S64_MAX);
memset(&msg, 0, sizeof(msg));
msg.cmd = CMD_CLK_ROUND_RATE;
@@ -256,7 +256,7 @@ static int tegra_bpmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
struct tegra_bpmp_clk_message msg;
memset(&request, 0, sizeof(request));
- request.rate = rate;
+ request.rate = min_t(u64, rate, S64_MAX);
memset(&msg, 0, sizeof(msg));
msg.cmd = CMD_CLK_SET_RATE;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index cfbaa90c7adb..a5f526bb0483 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1856,13 +1856,13 @@ static int dfll_fetch_pwm_params(struct tegra_dfll *td)
&td->reg_init_uV);
if (!ret) {
dev_err(td->dev, "couldn't get initialized voltage\n");
- return ret;
+ return -EINVAL;
}
ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period);
if (!ret) {
dev_err(td->dev, "couldn't get PWM period\n");
- return ret;
+ return -EINVAL;
}
td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1);
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index ff7da2d3e94d..24413812ec5b 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -227,6 +227,7 @@ enum clk_id {
tegra_clk_sdmmc4,
tegra_clk_sdmmc4_8,
tegra_clk_se,
+ tegra_clk_se_10,
tegra_clk_soc_therm,
tegra_clk_soc_therm_8,
tegra_clk_sor0,
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 2b2a3b81c16b..60cc34f90cb9 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -630,7 +630,7 @@ static struct tegra_periph_init_data periph_clks[] = {
INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8),
INT8("host1x", mux_pllc4_out1_pllc_pllc4_out2_pllp_clkm_plla_pllc4_out0, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_9),
INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
- INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
+ INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se_10),
INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8),
INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8),
INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03),
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 37244a7e68c2..9cf249c344d9 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1256,6 +1256,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
+ { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 7dc30dd6c8d5..f2c22120aaa7 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -266,6 +266,8 @@ static const char *enable_init_clks[] = {
"dpll_ddr_m2_ck",
"dpll_mpu_m2_ck",
"l3_gclk",
+ /* AM3_L3_L3_MAIN_CLKCTRL, needed during suspend */
+ "l3-clkctrl:00bc:0",
"l4hs_gclk",
"l4fw_gclk",
"l4ls_gclk",
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index e5538d577ce5..46c0add99570 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -272,6 +272,11 @@ static struct ti_dt_clk am43xx_clks[] = {
{ .node_name = NULL },
};
+static const char *enable_init_clks[] = {
+ /* AM4_L3_L3_MAIN_CLKCTRL, needed during suspend */
+ "l3-clkctrl:0000:0",
+};
+
int __init am43xx_dt_clk_init(void)
{
struct clk *clk1, *clk2;
@@ -283,6 +288,9 @@ int __init am43xx_dt_clk_init(void)
omap2_clk_disable_autoidle_all();
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
ti_clk_add_aliases();
/*
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index a38c92153979..d078e5d73ed9 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -255,7 +255,7 @@ static const struct omap_clkctrl_reg_data omap4_l3_instr_clkctrl_regs[] __initco
};
static const struct omap_clkctrl_reg_data omap4_ivahd_clkctrl_regs[] __initconst = {
- { OMAP4_IVA_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m5x2_ck" },
+ { OMAP4_IVA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_m5x2_ck" },
{ OMAP4_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m5x2_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 8694bc9f5fc7..f0542391ca4b 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -605,7 +605,7 @@ static struct ti_dt_clk omap54xx_clks[] = {
int __init omap5xxx_dt_clk_init(void)
{
int rc;
- struct clk *abe_dpll_ref, *abe_dpll, *sys_32k_ck, *usb_dpll;
+ struct clk *abe_dpll_ref, *abe_dpll, *abe_dpll_byp, *sys_32k_ck, *usb_dpll;
ti_dt_clocks_register(omap54xx_clks);
@@ -616,6 +616,16 @@ int __init omap5xxx_dt_clk_init(void)
abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_clk_mux");
sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+
+ /*
+ * This must also be set to sys_32k_ck to match or
+ * the ABE DPLL will not lock on a warm reboot when
+ * ABE timers are used.
+ */
+ abe_dpll_byp = clk_get_sys(NULL, "abe_dpll_bypass_clk_mux");
+ if (!rc)
+ rc = clk_set_parent(abe_dpll_byp, sys_32k_ck);
+
abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
if (!rc)
rc = clk_set_rate(abe_dpll, OMAP5_DPLL_ABE_DEFFREQ);
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 4e27f88062e7..8b9118ccd4cd 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -252,6 +252,12 @@ static const struct omap_clkctrl_reg_data dra7_l3instr_clkctrl_regs[] __initcons
{ 0 },
};
+static const struct omap_clkctrl_reg_data dra7_iva_clkctrl_regs[] __initconst = {
+ { DRA7_IVA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_h12x2_ck" },
+ { DRA7_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
+ { 0 },
+};
+
static const char * const dra7_dss_dss_clk_parents[] __initconst = {
"dpll_per_h12x2_ck",
NULL,
@@ -827,6 +833,7 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
{ 0x4a008c00, dra7_atl_clkctrl_regs },
{ 0x4a008d20, dra7_l4cfg_clkctrl_regs },
{ 0x4a008e20, dra7_l3instr_clkctrl_regs },
+ { 0x4a008f20, dra7_iva_clkctrl_regs },
{ 0x4a009020, dra7_cam_clkctrl_regs },
{ 0x4a009120, dra7_dss_clkctrl_regs },
{ 0x4a009220, dra7_gpu_clkctrl_regs },
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 95e36ba64acc..8024c6d2b9e9 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -498,6 +498,7 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
{
struct clk_init_data *init;
struct fapll_synth *synth;
+ struct clk *clk = ERR_PTR(-ENOMEM);
init = kzalloc(sizeof(*init), GFP_KERNEL);
if (!init)
@@ -520,13 +521,19 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
synth->hw.init = init;
synth->clk_pll = pll_clk;
- return clk_register(NULL, &synth->hw);
+ clk = clk_register(NULL, &synth->hw);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register clock\n");
+ goto free;
+ }
+
+ return clk;
free:
kfree(synth);
kfree(init);
- return ERR_PTR(-ENOMEM);
+ return clk;
}
static void __init ti_fapll_setup(struct device_node *node)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 9f00b8385fd4..14c7c4712478 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -553,7 +553,7 @@ config CLKSRC_QCOM
config CLKSRC_VERSATILE
bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
- depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
+ depends on GENERIC_SCHED_CLOCK
select TIMER_OF
default y if (ARCH_VEXPRESS || ARCH_VERSATILE) && ARM
help
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 49295052ba8b..996f025eb63c 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -19,7 +19,7 @@
static struct cn_callback_entry *
cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
- struct cb_id *id,
+ const struct cb_id *id,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
@@ -51,13 +51,13 @@ void cn_queue_release_callback(struct cn_callback_entry *cbq)
kfree(cbq);
}
-int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
+int cn_cb_equal(const struct cb_id *i1, const struct cb_id *i2)
{
return ((i1->idx == i2->idx) && (i1->val == i2->val));
}
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
- struct cb_id *id,
+ const struct cb_id *id,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
@@ -90,7 +90,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
return 0;
}
-void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
+void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id)
{
struct cn_callback_entry *cbq, *n;
int found = 0;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 7d59d18c6f26..48ec7ce6ecac 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -193,7 +193,7 @@ static void cn_rx_skb(struct sk_buff *skb)
*
* May sleep.
*/
-int cn_add_callback(struct cb_id *id, const char *name,
+int cn_add_callback(const struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
@@ -214,7 +214,7 @@ EXPORT_SYMBOL_GPL(cn_add_callback);
*
* May sleep while waiting for reference counter to become zero.
*/
-void cn_del_callback(struct cb_id *id)
+void cn_del_callback(const struct cb_id *id)
{
struct cn_dev *dev = &cdev;
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index a60aee1a1a29..65df9ef5b5bc 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
return len;
}
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- u32 qposinit;
-
- regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
- return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- int err;
- u32 res;
-
- err = kstrtouint(buf, 0, &res);
- if (err < 0)
- return err;
-
- regmap_write(priv->regmap32, QPOSINIT, res);
-
- return len;
-}
-
static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count,
void *ext_priv, char *buf)
@@ -302,11 +272,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
.write = ti_eqep_position_ceiling_write,
},
{
- .name = "floor",
- .read = ti_eqep_position_floor_read,
- .write = ti_eqep_position_floor_write,
- },
- {
.name = "enable",
.read = ti_eqep_position_enable_read,
.write = ti_eqep_position_enable_write,
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1f73fa75b1a0..e65e0a43be64 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -289,11 +289,6 @@ config ARM_STI_CPUFREQ
this config option if you wish to add CPUFreq support for STi based
SoCs.
-config ARM_TANGO_CPUFREQ
- bool
- depends on CPUFREQ_DT && ARCH_TANGO
- default y
-
config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
depends on ARCH_TEGRA && CPUFREQ_DT
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index f1b7e3dd6e5d..1ab9b1536304 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -79,7 +79,6 @@ obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
obj-$(CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM) += sun50i-cpufreq-nvmem.o
-obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 1e4fbb002a31..d3e5a6fceb61 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -53,6 +54,7 @@ struct acpi_cpufreq_data {
unsigned int resume;
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
+ unsigned int first_perf_state;
cpumask_var_t freqdomain_cpus;
void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
@@ -221,10 +223,10 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
perf = to_perf_data(data);
- cpufreq_for_each_entry(pos, policy->freq_table)
+ cpufreq_for_each_entry(pos, policy->freq_table + data->first_perf_state)
if (msr == perf->states[pos->driver_data].status)
return pos->frequency;
- return policy->freq_table[0].frequency;
+ return policy->freq_table[data->first_perf_state].frequency;
}
static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
@@ -363,6 +365,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
struct cpufreq_policy *policy;
unsigned int freq;
unsigned int cached_freq;
+ unsigned int state;
pr_debug("%s (%d)\n", __func__, cpu);
@@ -374,7 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
if (unlikely(!data || !policy->freq_table))
return 0;
- cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
+ state = to_perf_data(data)->state;
+ if (state < data->first_perf_state)
+ state = data->first_perf_state;
+
+ cached_freq = policy->freq_table[state].frequency;
freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
if (freq != cached_freq) {
/*
@@ -628,16 +635,54 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
}
#endif
+#ifdef CONFIG_ACPI_CPPC_LIB
+static u64 get_max_boost_ratio(unsigned int cpu)
+{
+ struct cppc_perf_caps perf_caps;
+ u64 highest_perf, nominal_perf;
+ int ret;
+
+ if (acpi_pstate_strict)
+ return 0;
+
+ ret = cppc_get_perf_caps(cpu, &perf_caps);
+ if (ret) {
+ pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
+ cpu, ret);
+ return 0;
+ }
+
+ highest_perf = perf_caps.highest_perf;
+ nominal_perf = perf_caps.nominal_perf;
+
+ if (!highest_perf || !nominal_perf) {
+ pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
+ return 0;
+ }
+
+ if (highest_perf < nominal_perf) {
+ pr_debug("CPU%d: nominal performance above highest\n", cpu);
+ return 0;
+ }
+
+ return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+}
+#else
+static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+#endif
+
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- unsigned int i;
- unsigned int valid_states = 0;
- unsigned int cpu = policy->cpu;
+ struct cpufreq_frequency_table *freq_table;
+ struct acpi_processor_performance *perf;
struct acpi_cpufreq_data *data;
+ unsigned int cpu = policy->cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned int valid_states = 0;
unsigned int result = 0;
- struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
- struct acpi_processor_performance *perf;
- struct cpufreq_frequency_table *freq_table;
+ unsigned int state_count;
+ u64 max_boost_ratio;
+ unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
@@ -750,8 +795,28 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
- freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
- GFP_KERNEL);
+ state_count = perf->state_count + 1;
+
+ max_boost_ratio = get_max_boost_ratio(cpu);
+ if (max_boost_ratio) {
+ /*
+ * Make a room for one more entry to represent the highest
+ * available "boost" frequency.
+ */
+ state_count++;
+ valid_states++;
+ data->first_perf_state = valid_states;
+ } else {
+ /*
+ * If the maximum "boost" frequency is unknown, ask the arch
+ * scale-invariance code to use the "nominal" performance for
+ * CPU utilization scaling so as to prevent the schedutil
+ * governor from selecting inadequate CPU frequencies.
+ */
+ arch_set_max_freq_ratio(true);
+ }
+
+ freq_table = kcalloc(state_count, sizeof(*freq_table), GFP_KERNEL);
if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
@@ -785,6 +850,30 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
valid_states++;
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+
+ if (max_boost_ratio) {
+ unsigned int state = data->first_perf_state;
+ unsigned int freq = freq_table[state].frequency;
+
+ /*
+ * Because the loop above sorts the freq_table entries in the
+ * descending order, freq is the maximum frequency in the table.
+ * Assume that it corresponds to the CPPC nominal frequency and
+ * use it to populate the frequency field of the extra "boost"
+ * frequency entry.
+ */
+ freq_table[0].frequency = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
+ /*
+ * The purpose of the extra "boost" frequency entry is to make
+ * the rest of cpufreq aware of the real maximum frequency, but
+ * the way to request it is the same as for the first_perf_state
+ * entry that is expected to cover the entire range of "boost"
+ * frequencies of the CPU, so copy the driver_data value from
+ * that entry.
+ */
+ freq_table[0].driver_data = freq_table[state].driver_data;
+ }
+
policy->freq_table = freq_table;
perf->state = 0;
@@ -858,8 +947,10 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
{
struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
policy->cpu);
+ struct acpi_cpufreq_data *data = policy->driver_data;
+ unsigned int freq = policy->freq_table[data->first_perf_state].frequency;
- if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
+ if (perf->states[0].core_frequency * 1000 != freq)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
}
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 3e31e5d28b79..4153150e20db 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -597,6 +597,16 @@ unmap_base:
return ret;
}
+static void brcm_avs_prepare_uninit(struct platform_device *pdev)
+{
+ struct private_data *priv;
+
+ priv = platform_get_drvdata(pdev);
+
+ iounmap(priv->avs_intr_base);
+ iounmap(priv->base);
+}
+
static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
@@ -732,21 +742,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
brcm_avs_driver.driver_data = pdev;
- return cpufreq_register_driver(&brcm_avs_driver);
+ ret = cpufreq_register_driver(&brcm_avs_driver);
+ if (ret)
+ brcm_avs_prepare_uninit(pdev);
+
+ return ret;
}
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
- struct private_data *priv;
int ret;
ret = cpufreq_unregister_driver(&brcm_avs_driver);
- if (ret)
- return ret;
+ WARN_ON(ret);
- priv = platform_get_drvdata(pdev);
- iounmap(priv->base);
- iounmap(priv->avs_intr_base);
+ brcm_avs_prepare_uninit(pdev);
return 0;
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 7cc9bd8568de..8a482c434ea6 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -30,13 +30,13 @@
#define DMI_PROCESSOR_MAX_SPEED 0x14
/*
- * These structs contain information parsed from per CPU
- * ACPI _CPC structures.
- * e.g. For each CPU the highest, lowest supported
- * performance capabilities, desired performance level
- * requested etc.
+ * This list contains information parsed from per CPU ACPI _CPC and _PSD
+ * structures: e.g. the highest and lowest supported performance, capabilities,
+ * desired performance, level requested etc. Depending on the share_type, not
+ * all CPUs will have an entry in the list.
*/
-static struct cppc_cpudata **all_cpu_data;
+static LIST_HEAD(cpu_data_list);
+
static bool boost_supported;
struct cppc_workaround_oem_info {
@@ -148,8 +148,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
+
{
- struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ unsigned int cpu = policy->cpu;
struct cpufreq_freqs freqs;
u32 desired_perf;
int ret = 0;
@@ -164,12 +166,12 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
- ret = cppc_set_perf(cpu_data->cpu, &cpu_data->perf_ctrls);
+ ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
cpufreq_freq_transition_end(policy, &freqs, ret != 0);
if (ret)
pr_debug("Failed to set target on CPU:%d. ret:%d\n",
- cpu_data->cpu, ret);
+ cpu, ret);
return ret;
}
@@ -182,7 +184,7 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
- struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
+ struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
unsigned int cpu = policy->cpu;
int ret;
@@ -193,6 +195,12 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
if (ret)
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->lowest_perf, cpu, ret);
+
+ /* Remove CPU node from list and free driver data for policy */
+ free_cpumask_var(cpu_data->shared_cpu_map);
+ list_del(&cpu_data->node);
+ kfree(policy->driver_data);
+ policy->driver_data = NULL;
}
/*
@@ -238,25 +246,61 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
}
#endif
-static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+
+static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
{
- struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- unsigned int cpu = policy->cpu;
- int ret = 0;
+ struct cppc_cpudata *cpu_data;
+ int ret;
- cpu_data->cpu = cpu;
- ret = cppc_get_perf_caps(cpu, caps);
+ cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
+ if (!cpu_data)
+ goto out;
+ if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
+ goto free_cpu;
+
+ ret = acpi_get_psd_map(cpu, cpu_data);
if (ret) {
- pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
- cpu, ret);
- return ret;
+ pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
+ goto free_mask;
+ }
+
+ ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
+ if (ret) {
+ pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
+ goto free_mask;
}
/* Convert the lowest and nominal freq from MHz to KHz */
- caps->lowest_freq *= 1000;
- caps->nominal_freq *= 1000;
+ cpu_data->perf_caps.lowest_freq *= 1000;
+ cpu_data->perf_caps.nominal_freq *= 1000;
+
+ list_add(&cpu_data->node, &cpu_data_list);
+
+ return cpu_data;
+
+free_mask:
+ free_cpumask_var(cpu_data->shared_cpu_map);
+free_cpu:
+ kfree(cpu_data);
+out:
+ return NULL;
+}
+
+static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct cppc_cpudata *cpu_data;
+ struct cppc_perf_caps *caps;
+ int ret;
+
+ cpu_data = cppc_cpufreq_get_cpu_data(cpu);
+ if (!cpu_data) {
+ pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
+ return -ENODEV;
+ }
+ caps = &cpu_data->perf_caps;
+ policy->driver_data = cpu_data;
/*
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
@@ -280,26 +324,25 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
- int i;
-
+ switch (policy->shared_type) {
+ case CPUFREQ_SHARED_TYPE_HW:
+ case CPUFREQ_SHARED_TYPE_NONE:
+ /* Nothing to be done - we'll have a policy for each CPU */
+ break;
+ case CPUFREQ_SHARED_TYPE_ANY:
+ /*
+ * All CPUs in the domain will share a policy and all cpufreq
+ * operations will use a single cppc_cpudata structure stored
+ * in policy->driver_data.
+ */
cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
-
- for_each_cpu(i, policy->cpus) {
- if (unlikely(i == cpu))
- continue;
-
- memcpy(&all_cpu_data[i]->perf_caps, caps,
- sizeof(cpu_data->perf_caps));
- }
- } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
- /* Support only SW_ANY for now. */
- pr_debug("Unsupported CPU co-ord type\n");
+ break;
+ default:
+ pr_debug("Unsupported CPU co-ord type: %d\n",
+ policy->shared_type);
return -EFAULT;
}
- cpu_data->cur_policy = policy;
-
/*
* If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
* is supported.
@@ -354,9 +397,12 @@ static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
- struct cppc_cpudata *cpu_data = all_cpu_data[cpu];
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cppc_cpudata *cpu_data = policy->driver_data;
int ret;
+ cpufreq_cpu_put(policy);
+
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
if (ret)
return ret;
@@ -372,7 +418,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
{
- struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
+ struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret;
@@ -396,6 +442,19 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
return 0;
}
+static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+
+ return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
+}
+cpufreq_freq_attr_ro(freqdomain_cpus);
+
+static struct freq_attr *cppc_cpufreq_attr[] = {
+ &freqdomain_cpus,
+ NULL,
+};
+
static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy,
@@ -404,6 +463,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu,
.set_boost = cppc_cpufreq_set_boost,
+ .attr = cppc_cpufreq_attr,
.name = "cppc_cpufreq",
};
@@ -415,10 +475,13 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
*/
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
{
- struct cppc_cpudata *cpu_data = all_cpu_data[cpu];
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cppc_cpudata *cpu_data = policy->driver_data;
u64 desired_perf;
int ret;
+ cpufreq_cpu_put(policy);
+
ret = cppc_get_desired_perf(cpu, &desired_perf);
if (ret < 0)
return -EIO;
@@ -451,68 +514,33 @@ static void cppc_check_hisi_workaround(void)
static int __init cppc_cpufreq_init(void)
{
- struct cppc_cpudata *cpu_data;
- int i, ret = 0;
-
- if (acpi_disabled)
+ if ((acpi_disabled) || !acpi_cpc_valid())
return -ENODEV;
- all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
- GFP_KERNEL);
- if (!all_cpu_data)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
- if (!all_cpu_data[i])
- goto out;
-
- cpu_data = all_cpu_data[i];
- if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
- goto out;
- }
-
- ret = acpi_get_psd_map(all_cpu_data);
- if (ret) {
- pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
- goto out;
- }
+ INIT_LIST_HEAD(&cpu_data_list);
cppc_check_hisi_workaround();
- ret = cpufreq_register_driver(&cppc_cpufreq_driver);
- if (ret)
- goto out;
+ return cpufreq_register_driver(&cppc_cpufreq_driver);
+}
- return ret;
+static inline void free_cpu_data(void)
+{
+ struct cppc_cpudata *iter, *tmp;
-out:
- for_each_possible_cpu(i) {
- cpu_data = all_cpu_data[i];
- if (!cpu_data)
- break;
- free_cpumask_var(cpu_data->shared_cpu_map);
- kfree(cpu_data);
+ list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
+ free_cpumask_var(iter->shared_cpu_map);
+ list_del(&iter->node);
+ kfree(iter);
}
- kfree(all_cpu_data);
- return -ENODEV;
}
static void __exit cppc_cpufreq_exit(void)
{
- struct cppc_cpudata *cpu_data;
- int i;
-
cpufreq_unregister_driver(&cppc_cpufreq_driver);
- for_each_possible_cpu(i) {
- cpu_data = all_cpu_data[i];
- free_cpumask_var(cpu_data->shared_cpu_map);
- kfree(cpu_data);
- }
-
- kfree(all_cpu_data);
+ free_cpu_data();
}
module_exit(cppc_cpufreq_exit);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bd2db0188cbb..3ba2f716fe97 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -141,8 +141,6 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "st,stih410", },
{ .compatible = "st,stih418", },
- { .compatible = "sigma,tango4", },
-
{ .compatible = "ti,am33xx", },
{ .compatible = "ti,am43", },
{ .compatible = "ti,dra7", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index ad4234518ef6..b1e1bdc63b01 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -175,7 +175,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver dt_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = set_target,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c17aa2973c44..7d0ae968def7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2097,6 +2097,46 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
+/**
+ * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
+ * @cpu: Target CPU.
+ * @min_perf: Minimum (required) performance level (units of @capacity).
+ * @target_perf: Terget (desired) performance level (units of @capacity).
+ * @capacity: Capacity of the target CPU.
+ *
+ * Carry out a fast performance level switch of @cpu without sleeping.
+ *
+ * The driver's ->adjust_perf() callback invoked by this function must be
+ * suitable for being called from within RCU-sched read-side critical sections
+ * and it is expected to select a suitable performance level equal to or above
+ * @min_perf and preferably equal to or below @target_perf.
+ *
+ * This function must not be called if policy->fast_switch_enabled is unset.
+ *
+ * Governors calling this function must guarantee that it will never be invoked
+ * twice in parallel for the same CPU and that it will never be called in
+ * parallel with either ->target() or ->target_index() or ->fast_switch() for
+ * the same CPU.
+ */
+void cpufreq_driver_adjust_perf(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity)
+{
+ cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
+}
+
+/**
+ * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
+ *
+ * Return 'true' if the ->adjust_perf callback is present for the
+ * current driver or 'false' otherwise.
+ */
+bool cpufreq_driver_has_adjust_perf(void)
+{
+ return !!cpufreq_driver->adjust_perf;
+}
+
/* Must set freqs->new to intermediate frequency */
static int __target_intermediate(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int index)
@@ -2770,8 +2810,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret)
goto err_boost_unreg;
- if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
- list_empty(&cpufreq_policy_list)) {
+ if (unlikely(list_empty(&cpufreq_policy_list))) {
/* if all ->init() calls failed, unregister */
ret = -ENODEV;
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 91f477a6cbc4..9e97f60f8199 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -95,7 +95,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver davinci_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = davinci_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2a4db856222f..5175ae3cac44 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -76,11 +76,6 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
-static inline int32_t percent_fp(int percent)
-{
- return div_fp(percent, 100);
-}
-
static inline u64 mul_ext_fp(u64 x, u64 y)
{
return (x * y) >> EXT_FRAC_BITS;
@@ -91,11 +86,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
return div64_u64(x << EXT_FRAC_BITS, y);
}
-static inline int32_t percent_ext_fp(int percent)
-{
- return div_ext_fp(percent, 100);
-}
-
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -829,13 +819,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
-static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
+static void intel_pstate_get_hwp_max(struct cpudata *cpu, int *phy_max,
int *current_max)
{
u64 cap;
- rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
- WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
+ rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ WRITE_ONCE(cpu->hwp_cap_cached, cap);
if (global.no_turbo || global.turbo_disabled)
*current_max = HWP_GUARANTEED_PERF(cap);
else
@@ -924,7 +914,7 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
}
value &= ~GENMASK_ULL(31, 0);
- min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached);
+ min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
/* Set hwp_max = hwp_min */
value |= HWP_MAX_PERF(min_perf);
@@ -1223,7 +1213,7 @@ static void update_qos_request(enum freq_qos_req_type type)
continue;
if (hwp_active)
- intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
else
turbo_max = cpu->pstate.turbo_pstate;
@@ -1724,21 +1714,22 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
- cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
- cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (hwp_active && !hwp_mode_bdw) {
unsigned int phy_max, current_max;
- intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+ intel_pstate_get_hwp_max(cpu, &phy_max, &current_max);
cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
cpu->pstate.turbo_pstate = phy_max;
+ cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
} else {
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
}
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -1760,6 +1751,7 @@ static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
{
u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
u32 max_limit = (hwp_req & 0xff00) >> 8;
u32 min_limit = (hwp_req & 0xff);
u32 boost_level1;
@@ -1786,14 +1778,14 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
cpu->hwp_boost_min = min_limit;
/* level at half way mark between min and guranteed */
- boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1;
+ boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
if (cpu->hwp_boost_min < boost_level1)
cpu->hwp_boost_min = boost_level1;
- else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
- cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached);
- else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) &&
- max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
+ else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
+ cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
+ else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
+ max_limit != HWP_GUARANTEED_PERF(hwp_cap))
cpu->hwp_boost_min = max_limit;
else
return;
@@ -2207,9 +2199,9 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
unsigned int policy_min,
unsigned int policy_max)
{
- int max_freq = intel_pstate_get_max_freq(cpu);
int32_t max_policy_perf, min_policy_perf;
int max_state, turbo_max;
+ int max_freq;
/*
* HWP needs some special consideration, because on BDX the
@@ -2217,12 +2209,13 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* rather than pure ratios.
*/
if (hwp_active) {
- intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
} else {
max_state = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
turbo_max = cpu->pstate.turbo_pstate;
}
+ max_freq = max_state * cpu->pstate.scaling;
max_policy_perf = max_state * policy_max / max_freq;
if (policy_max == policy_min) {
@@ -2325,9 +2318,18 @@ static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
struct cpufreq_policy_data *policy)
{
+ int max_freq;
+
update_turbo_state();
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- intel_pstate_get_max_freq(cpu));
+ if (hwp_active) {
+ int max_state, turbo_max;
+
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
+ max_freq = max_state * cpu->pstate.scaling;
+ } else {
+ max_freq = intel_pstate_get_max_freq(cpu);
+ }
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
intel_pstate_adjust_policy_max(cpu, policy);
}
@@ -2496,7 +2498,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
* driver call was via the normal or fast switch path. Various graphs
* output from the intel_pstate_tracer.py utility that include core_busy
* (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
- * so we use 10 to indicate the the normal path through the driver, and
+ * so we use 10 to indicate the normal path through the driver, and
* 90 to indicate the fast switch path through the driver.
* The scaled_busy field is not used, and is set to 0.
*/
@@ -2526,20 +2528,19 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
fp_toint(cpu->iowait_boost * 100));
}
-static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
- bool strict, bool fast_switch)
+static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
+ u32 desired, bool fast_switch)
{
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
value &= ~HWP_MIN_PERF(~0L);
- value |= HWP_MIN_PERF(target_pstate);
+ value |= HWP_MIN_PERF(min);
- /*
- * The entire MSR needs to be updated in order to update the HWP min
- * field in it, so opportunistically update the max too if needed.
- */
value &= ~HWP_MAX_PERF(~0L);
- value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
+ value |= HWP_MAX_PERF(max);
+
+ value &= ~HWP_DESIRED_PERF(~0L);
+ value |= HWP_DESIRED_PERF(desired);
if (value == prev)
return;
@@ -2551,7 +2552,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
-static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
+static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
u32 target_pstate, bool fast_switch)
{
if (fast_switch)
@@ -2569,11 +2570,15 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int old_pstate = cpu->pstate.current_pstate;
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
- if (hwp_active)
- intel_cpufreq_adjust_hwp(cpu, target_pstate,
- policy->strict_target, fast_switch);
- else if (target_pstate != old_pstate)
- intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
+ if (hwp_active) {
+ int max_pstate = policy->strict_target ?
+ target_pstate : cpu->max_perf_ratio;
+
+ intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
+ fast_switch);
+ } else if (target_pstate != old_pstate) {
+ intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
+ }
cpu->pstate.current_pstate = target_pstate;
@@ -2634,6 +2639,48 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
return target_pstate * cpu->pstate.scaling;
}
+static void intel_cpufreq_adjust_perf(unsigned int cpunum,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity)
+{
+ struct cpudata *cpu = all_cpu_data[cpunum];
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
+ int old_pstate = cpu->pstate.current_pstate;
+ int cap_pstate, min_pstate, max_pstate, target_pstate;
+
+ update_turbo_state();
+ cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
+
+ /* Optimization: Avoid unnecessary divisions. */
+
+ target_pstate = cap_pstate;
+ if (target_perf < capacity)
+ target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
+
+ min_pstate = cap_pstate;
+ if (min_perf < capacity)
+ min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
+
+ if (min_pstate < cpu->pstate.min_pstate)
+ min_pstate = cpu->pstate.min_pstate;
+
+ if (min_pstate < cpu->min_perf_ratio)
+ min_pstate = cpu->min_perf_ratio;
+
+ max_pstate = min(cap_pstate, cpu->max_perf_ratio);
+ if (max_pstate < min_pstate)
+ max_pstate = min_pstate;
+
+ target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
+
+ intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
+
+ cpu->pstate.current_pstate = target_pstate;
+ intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
+}
+
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int max_state, turbo_max, min_freq, max_freq, ret;
@@ -2664,7 +2711,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (hwp_active) {
u64 value;
- intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
@@ -3032,6 +3079,7 @@ static int __init intel_pstate_init(void)
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
+ intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
if (!default_driver)
default_driver = &intel_pstate;
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index 86f612593e49..fb72d709db56 100644
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -116,7 +116,7 @@ static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver ls1x_cpufreq_driver = {
.name = "cpufreq-ls1x",
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = ls1x_cpufreq_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 022e3e966e71..f2e491b25b07 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -463,7 +463,7 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver mtk_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 3694bb030df3..e035ee216b0f 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -144,7 +144,7 @@ static int omap_cpu_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver omap_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = omap_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 73621bc11976..4f20c6a9108d 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -439,8 +439,7 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
- .flags = CPUFREQ_PM_NO_WARN |
- CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0acc9e241cd7..b9ccb6a3dad9 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
/* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data,
- unsigned int index)
+ unsigned int index,
+ struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy;
u32 fid = 0;
u32 vid = 0;
int res;
@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- policy = cpufreq_cpu_get(smp_processor_id());
- cpufreq_cpu_put(policy);
-
cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
cpufreq_freq_transition_end(policy, &freqs, res);
@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
powernow_k8_acpi_pst_values(data, newstate);
- ret = transition_frequency_fidvid(data, newstate);
+ ret = transition_frequency_fidvid(data, newstate, pol);
if (ret) {
pr_err("transition frequency failed\n");
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 9ed5341dc515..356244510b18 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -54,7 +54,7 @@ static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
if (IS_ERR(opp))
return PTR_ERR(opp);
- ret = dev_pm_opp_set_bw(dev, opp);
+ ret = dev_pm_opp_set_opp(dev, opp);
dev_pm_opp_put(opp);
return ret;
}
@@ -347,6 +347,12 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret)
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ }
+
return 0;
error:
devm_iounmap(dev, base);
@@ -374,7 +380,7 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = {
};
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 37efc0dc3f91..7380c32b238e 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -420,7 +420,7 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
#endif
static struct cpufreq_driver s3c24xx_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.target = s3c_cpufreq_target,
.get = cpufreq_generic_get,
.init = s3c_cpufreq_init,
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index bed496cf8d24..69786e5bbf05 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -574,7 +574,7 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
}
static struct cpufreq_driver s5pv210_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s5pv210_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index 5c075ef6adc0..252b9fc26124 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -186,7 +186,7 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1100_target,
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index d9d04d935b3a..1a83c8678a63 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -310,7 +310,7 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1110_target,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 491a0a24fb1e..5bd03b59887f 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -217,7 +217,7 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index e5140ad63db8..d6a698a1b5d1 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -191,7 +191,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver scpi_cpufreq_driver = {
.name = "scpi-cpufreq",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 73bd8dc47074..7d0d62a06bf3 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -160,7 +160,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy)
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/tango-cpufreq.c b/drivers/cpufreq/tango-cpufreq.c
deleted file mode 100644
index 89a7f860bfe8..000000000000
--- a/drivers/cpufreq/tango-cpufreq.c
+++ /dev/null
@@ -1,38 +0,0 @@
-#include <linux/of.h>
-#include <linux/cpu.h>
-#include <linux/clk.h>
-#include <linux/pm_opp.h>
-#include <linux/platform_device.h>
-
-static const struct of_device_id machines[] __initconst = {
- { .compatible = "sigma,tango4" },
- { /* sentinel */ }
-};
-
-static int __init tango_cpufreq_init(void)
-{
- struct device *cpu_dev = get_cpu_device(0);
- unsigned long max_freq;
- struct clk *cpu_clk;
- void *res;
-
- if (!of_match_node(machines, of_root))
- return -ENODEV;
-
- cpu_clk = clk_get(cpu_dev, NULL);
- if (IS_ERR(cpu_clk))
- return -ENODEV;
-
- max_freq = clk_get_rate(cpu_clk);
-
- dev_pm_opp_add(cpu_dev, max_freq / 1, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 2, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 3, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 5, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 9, 0);
-
- res = platform_device_register_data(NULL, "cpufreq-dt", -1, NULL, 0);
-
- return PTR_ERR_OR_ZERO(res);
-}
-device_initcall(tango_cpufreq_init);
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index e566ea298b59..5d1943e787b0 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -117,7 +117,7 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
static struct cpufreq_driver tegra186_cpufreq_driver = {
.name = "tegra186",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.get = tegra186_cpufreq_get,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 6a67f36f3b80..a9620e4489ae 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -272,8 +272,7 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
static struct cpufreq_driver tegra194_cpufreq_driver = {
.name = "tegra194",
- .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index 8c893043953e..e8db3d75be25 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -32,6 +32,16 @@ static bool cpu0_node_has_opp_v2_prop(void)
return ret;
}
+static void tegra20_cpufreq_put_supported_hw(void *opp_table)
+{
+ dev_pm_opp_put_supported_hw(opp_table);
+}
+
+static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
+{
+ platform_device_unregister(cpufreq_dt);
+}
+
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
struct platform_device *cpufreq_dt;
@@ -68,42 +78,31 @@ static int tegra20_cpufreq_probe(struct platform_device *pdev)
return err;
}
+ err = devm_add_action_or_reset(&pdev->dev,
+ tegra20_cpufreq_put_supported_hw,
+ opp_table);
+ if (err)
+ return err;
+
cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
err = PTR_ERR_OR_ZERO(cpufreq_dt);
if (err) {
dev_err(&pdev->dev,
"failed to create cpufreq-dt device: %d\n", err);
- goto err_put_supported_hw;
+ return err;
}
- platform_set_drvdata(pdev, cpufreq_dt);
-
- return 0;
-
-err_put_supported_hw:
- dev_pm_opp_put_supported_hw(opp_table);
-
- return err;
-}
-
-static int tegra20_cpufreq_remove(struct platform_device *pdev)
-{
- struct platform_device *cpufreq_dt;
- struct opp_table *opp_table;
-
- cpufreq_dt = platform_get_drvdata(pdev);
- platform_device_unregister(cpufreq_dt);
-
- opp_table = dev_pm_opp_get_opp_table(get_cpu_device(0));
- dev_pm_opp_put_supported_hw(opp_table);
- dev_pm_opp_put_opp_table(opp_table);
+ err = devm_add_action_or_reset(&pdev->dev,
+ tegra20_cpufreq_dt_unregister,
+ cpufreq_dt);
+ if (err)
+ return err;
return 0;
}
static struct platform_driver tegra20_cpufreq_driver = {
.probe = tegra20_cpufreq_probe,
- .remove = tegra20_cpufreq_remove,
.driver = {
.name = "tegra20-cpufreq",
},
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index f711d8eaea6a..51dfa9ae6cf5 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -486,8 +486,7 @@ static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
static struct cpufreq_driver ve_spc_cpufreq_driver = {
.name = "vexpress-spc",
- .flags = CPUFREQ_STICKY |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = ve_spc_cpufreq_set_target,
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index 7f8ddc04342d..abe51185f243 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -155,8 +155,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
static const struct of_device_id compatible_machine_match[] = {
{ .compatible = "arm,vexpress,v2p-ca15_a7" },
- { .compatible = "samsung,exynos5420" },
- { .compatible = "samsung,exynos5800" },
+ { .compatible = "google,peach" },
{},
};
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index bbd51703e738..e535f28a8028 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS
+ select CRYPTO_ENGINE
select CRYPTO_SHA1
select CRYPTO_MD5
select CRYPTO_SHA256
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index 3c16797b25b9..f2e17b0c4fa0 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -1,12 +1,13 @@
config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
tristate "Support for Intel Keem Bay OCS AES/SM4 HW acceleration"
- depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on ARCH_KEEMBAY || COMPILE_TEST
select CRYPTO_SKCIPHER
select CRYPTO_AEAD
select CRYPTO_ENGINE
help
Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) AES and
- SM4 cihper hardware acceleration for use with Crypto API.
+ SM4 cipher hardware acceleration for use with Crypto API.
Provides HW acceleration for the following transformations:
cbc(aes), ctr(aes), ccm(aes), gcm(aes), cbc(sm4), ctr(sm4), ccm(sm4)
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index fabfaaccca87..fa56b45620c7 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -300,11 +300,11 @@ struct mv_cesa_tdma_desc {
__le32 byte_cnt;
union {
__le32 src;
- dma_addr_t src_dma;
+ u32 src_dma;
};
union {
__le32 dst;
- dma_addr_t dst_dma;
+ u32 dst_dma;
};
__le32 next_dma;
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index beb379b23dc3..846a3d90b41a 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -11,6 +11,7 @@ config CRYPTO_DEV_QAT
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_AES
select FW_LOADER
config CRYPTO_DEV_QAT_DH895xCC
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 27513d311242..737b207c9e30 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -367,19 +367,28 @@ void kill_dev_dax(struct dev_dax *dev_dax)
}
EXPORT_SYMBOL_GPL(kill_dev_dax);
-static void free_dev_dax_ranges(struct dev_dax *dev_dax)
+static void trim_dev_dax_range(struct dev_dax *dev_dax)
{
+ int i = dev_dax->nr_range - 1;
+ struct range *range = &dev_dax->ranges[i].range;
struct dax_region *dax_region = dev_dax->region;
- int i;
device_lock_assert(dax_region->dev);
- for (i = 0; i < dev_dax->nr_range; i++) {
- struct range *range = &dev_dax->ranges[i].range;
-
- __release_region(&dax_region->res, range->start,
- range_len(range));
+ dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
+ (unsigned long long)range->start,
+ (unsigned long long)range->end);
+
+ __release_region(&dax_region->res, range->start, range_len(range));
+ if (--dev_dax->nr_range == 0) {
+ kfree(dev_dax->ranges);
+ dev_dax->ranges = NULL;
}
- dev_dax->nr_range = 0;
+}
+
+static void free_dev_dax_ranges(struct dev_dax *dev_dax)
+{
+ while (dev_dax->nr_range)
+ trim_dev_dax_range(dev_dax);
}
static void unregister_dev_dax(void *dev)
@@ -763,22 +772,14 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
return 0;
}
- ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
- * (dev_dax->nr_range + 1), GFP_KERNEL);
- if (!ranges)
+ alloc = __request_region(res, start, size, dev_name(dev), 0);
+ if (!alloc)
return -ENOMEM;
- alloc = __request_region(res, start, size, dev_name(dev), 0);
- if (!alloc) {
- /*
- * If this was an empty set of ranges nothing else
- * will release @ranges, so do it now.
- */
- if (!dev_dax->nr_range) {
- kfree(ranges);
- ranges = NULL;
- }
- dev_dax->ranges = ranges;
+ ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
+ * (dev_dax->nr_range + 1), GFP_KERNEL);
+ if (!ranges) {
+ __release_region(res, alloc->start, resource_size(alloc));
return -ENOMEM;
}
@@ -804,15 +805,10 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
return 0;
rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
- if (rc) {
- dev_dbg(dev, "delete range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
- &alloc->start, &alloc->end);
- dev_dax->nr_range--;
- __release_region(res, alloc->start, resource_size(alloc));
- return rc;
- }
+ if (rc)
+ trim_dev_dax_range(dev_dax);
- return 0;
+ return rc;
}
static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
@@ -885,12 +881,7 @@ static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
if (shrink >= range_len(range)) {
devm_release_action(dax_region->dev,
unregister_dax_mapping, &mapping->dev);
- __release_region(&dax_region->res, range->start,
- range_len(range));
- dev_dax->nr_range--;
- dev_dbg(dev, "delete range[%d]: %#llx:%#llx\n", i,
- (unsigned long long) range->start,
- (unsigned long long) range->end);
+ trim_dev_dax_range(dev_dax);
to_shrink -= shrink;
if (!to_shrink)
break;
@@ -1114,16 +1105,9 @@ static ssize_t align_show(struct device *dev,
static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax)
{
- resource_size_t dev_size = dev_dax_size(dev_dax);
struct device *dev = &dev_dax->dev;
int i;
- if (dev_size > 0 && !alloc_is_aligned(dev_dax, dev_size)) {
- dev_dbg(dev, "%s: align %u invalid for size %pa\n",
- __func__, dev_dax->align, &dev_size);
- return -EINVAL;
- }
-
for (i = 0; i < dev_dax->nr_range; i++) {
size_t len = range_len(&dev_dax->ranges[i].range);
@@ -1274,7 +1258,6 @@ static void dev_dax_release(struct device *dev)
put_dax(dax_dev);
free_dev_dax_id(dev_dax);
dax_region_put(dax_region);
- kfree(dev_dax->ranges);
kfree(dev_dax->pgmap);
kfree(dev_dax);
}
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index 62b26bfceab1..062e8bc14223 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -52,7 +52,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
/* adjust the dax_region range to the start of data */
range = pgmap.range;
- range.start += offset,
+ range.start += offset;
dax_region = alloc_dax_region(dev, region_id, &range,
nd_region->target_node, le32_to_cpu(pfn_sb->align),
IORESOURCE_DAX_STATIC);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index edc279be3e59..cadbd0a1a1ef 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -752,6 +752,7 @@ err_chrdev:
static void __exit dax_core_exit(void)
{
+ dax_bus_exit();
unregister_chrdev_region(dax_devt, MINORMASK+1);
ida_destroy(&dax_minor_ida);
dax_fs_exit();
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 6aa10de792b3..bf3047896e41 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -757,6 +757,9 @@ static void devfreq_dev_release(struct device *dev)
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
+ if (devfreq->opp_table)
+ dev_pm_opp_put_opp_table(devfreq->opp_table);
+
mutex_destroy(&devfreq->lock);
kfree(devfreq);
}
@@ -844,6 +847,10 @@ struct devfreq *devfreq_add_device(struct device *dev,
}
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
+ devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
+ if (IS_ERR(devfreq->opp_table))
+ devfreq->opp_table = NULL;
+
atomic_set(&devfreq->suspend_count, 0);
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
@@ -893,13 +900,13 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_devfreq;
devfreq->nb_min.notifier_call = qos_min_notifier_call;
- err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
+ err = dev_pm_qos_add_notifier(dev, &devfreq->nb_min,
DEV_PM_QOS_MIN_FREQUENCY);
if (err)
goto err_devfreq;
devfreq->nb_max.notifier_call = qos_max_notifier_call;
- err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
+ err = dev_pm_qos_add_notifier(dev, &devfreq->nb_max,
DEV_PM_QOS_MAX_FREQUENCY);
if (err)
goto err_devfreq;
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index 2a52f97b542d..70f44b3ca42e 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -40,7 +40,7 @@
/*
* Definition of governor attribute flags except for common sysfs attributes
* - DEVFREQ_GOV_ATTR_POLLING_INTERVAL
- * : Indicate polling_interal sysfs attribute
+ * : Indicate polling_interval sysfs attribute
* - DEVFREQ_GOV_ATTR_TIMER
* : Indicate timer sysfs attribute
*/
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 63332e4a65ae..b094132bd20b 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -19,18 +19,16 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
= (struct devfreq_passive_data *)devfreq->data;
struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
unsigned long child_freq = ULONG_MAX;
- struct dev_pm_opp *opp;
- int i, count, ret = 0;
+ struct dev_pm_opp *opp, *p_opp;
+ int i, count;
/*
* If the devfreq device with passive governor has the specific method
* to determine the next frequency, should use the get_target_freq()
* of struct devfreq_passive_data.
*/
- if (p_data->get_target_freq) {
- ret = p_data->get_target_freq(devfreq, freq);
- goto out;
- }
+ if (p_data->get_target_freq)
+ return p_data->get_target_freq(devfreq, freq);
/*
* If the parent and passive devfreq device uses the OPP table,
@@ -56,26 +54,35 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
* list of parent device. Because in this case, *freq is temporary
* value which is decided by ondemand governor.
*/
- opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0);
- if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- goto out;
- }
+ if (devfreq->opp_table && parent_devfreq->opp_table) {
+ p_opp = devfreq_recommended_opp(parent_devfreq->dev.parent,
+ freq, 0);
+ if (IS_ERR(p_opp))
+ return PTR_ERR(p_opp);
+
+ opp = dev_pm_opp_xlate_required_opp(parent_devfreq->opp_table,
+ devfreq->opp_table, p_opp);
+ dev_pm_opp_put(p_opp);
- dev_pm_opp_put(opp);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ *freq = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ return 0;
+ }
/*
- * Get the OPP table's index of decided freqeuncy by governor
+ * Get the OPP table's index of decided frequency by governor
* of parent device.
*/
for (i = 0; i < parent_devfreq->profile->max_state; i++)
if (parent_devfreq->profile->freq_table[i] == *freq)
break;
- if (i == parent_devfreq->profile->max_state) {
- ret = -EINVAL;
- goto out;
- }
+ if (i == parent_devfreq->profile->max_state)
+ return -EINVAL;
/* Get the suitable frequency by using index of parent device. */
if (i < devfreq->profile->max_state) {
@@ -88,8 +95,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
/* Return the suitable frequency for passive device. */
*freq = child_freq;
-out:
- return ret;
+ return 0;
}
static int devfreq_passive_notifier_call(struct notifier_block *nb,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 2e912166a993..9e9d3b4c6d48 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -400,7 +400,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
default:
ret = -EINVAL;
goto err_edev;
- };
+ }
no_pmu:
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 117cad7968ab..ce83f883ca65 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -647,7 +647,7 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
return PTR_ERR(opp);
}
- ret = dev_pm_opp_set_bw(dev, opp);
+ ret = dev_pm_opp_set_opp(dev, opp);
dev_pm_opp_put(opp);
return ret;
@@ -849,7 +849,7 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- err = dev_pm_opp_of_add_table(&pdev->dev);
+ err = dev_pm_opp_of_add_table_noclk(&pdev->dev, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
goto put_hw;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 0eb80c1ecdab..9ad6397aaa97 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
dmabuf->ops->release(dmabuf);
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
-
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
@@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
kfree(dmabuf);
}
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+
+ return 0;
+}
+
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
@@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
@@ -1166,9 +1179,6 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
- struct file *oldfile;
- int ret;
-
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@@ -1186,22 +1196,10 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* readjust the vma */
- get_file(dmabuf->file);
- oldfile = vma->vm_file;
- vma->vm_file = dmabuf->file;
+ vma_set_file(vma, dmabuf->file);
vma->vm_pgoff = pgoff;
- ret = dmabuf->ops->mmap(dmabuf, vma);
- if (ret) {
- /* restore old parameters on failure */
- vma->vm_file = oldfile;
- fput(dmabuf->file);
- } else {
- if (oldfile)
- fput(oldfile);
- }
- return ret;
-
+ return dmabuf->ops->mmap(dmabuf, vma);
}
EXPORT_SYMBOL_GPL(dma_buf_mmap);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index bb5a42b10c29..6ddbeb5dfbf6 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -200,7 +200,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
max = max(old->shared_count + num_fences,
old->shared_max * 2);
} else {
- max = 4;
+ max = max(4ul, roundup_pow_of_two(num_fences));
}
new = dma_resv_list_alloc(max);
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index 6e54cdec3da0..974467791032 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += heap-helpers.o
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index e55384dc115b..364fc2f3e499 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -2,76 +2,309 @@
/*
* DMABUF CMA heap exporter
*
- * Copyright (C) 2012, 2019 Linaro Ltd.
+ * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * Also utilizing parts of Andrew Davis' SRAM heap:
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*/
-
#include <linux/cma.h>
-#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/scatterlist.h>
-#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
-#include "heap-helpers.h"
struct cma_heap {
struct dma_heap *heap;
struct cma *cma;
};
-static void cma_heap_free(struct heap_helper_buffer *buffer)
+struct cma_heap_buffer {
+ struct cma_heap *heap;
+ struct list_head attachments;
+ struct mutex lock;
+ unsigned long len;
+ struct page *cma_pages;
+ struct page **pages;
+ pgoff_t pagecount;
+ int vmap_cnt;
+ void *vaddr;
+};
+
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table table;
+ struct list_head list;
+ bool mapped;
+};
+
+static int cma_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
+ buffer->pagecount, 0,
+ buffer->pagecount << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(a);
+ return ret;
+ }
+
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+ a->mapped = false;
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void cma_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(&a->table);
+ kfree(a);
+}
+
+static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
{
- struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
- unsigned long nr_pages = buffer->pagecount;
- struct page *cma_pages = buffer->priv_virt;
+ struct dma_heap_attachment *a = attachment->priv;
+ struct sg_table *table = &a->table;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+ a->mapped = true;
+ return table;
+}
+
+static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+
+ a->mapped = false;
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct cma_heap_buffer *buffer = vma->vm_private_data;
+
+ if (vmf->pgoff > buffer->pagecount)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = buffer->pages[vmf->pgoff];
+ get_page(vmf->page);
+
+ return 0;
+}
+
+static const struct vm_operations_struct dma_heap_vm_ops = {
+ .fault = cma_heap_vm_fault,
+};
+
+static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &dma_heap_vm_ops;
+ vma->vm_private_data = buffer;
+
+ return 0;
+}
+
+static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
+{
+ void *vaddr;
+
+ vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+ int ret = 0;
+
+ mutex_lock(&buffer->lock);
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ dma_buf_map_set_vaddr(map, buffer->vaddr);
+ goto out;
+ }
+
+ vaddr = cma_heap_do_vmap(buffer);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto out;
+ }
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+ dma_buf_map_set_vaddr(map, buffer->vaddr);
+out:
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
+
+static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+ dma_buf_map_clear(map);
+}
+
+static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct cma_heap *cma_heap = buffer->heap;
+
+ if (buffer->vmap_cnt > 0) {
+ WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
/* free page list */
kfree(buffer->pages);
/* release memory */
- cma_release(cma_heap->cma, cma_pages, nr_pages);
+ cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
kfree(buffer);
}
-/* dmabuf heap CMA operations functions */
+static const struct dma_buf_ops cma_heap_buf_ops = {
+ .attach = cma_heap_attach,
+ .detach = cma_heap_detach,
+ .map_dma_buf = cma_heap_map_dma_buf,
+ .unmap_dma_buf = cma_heap_unmap_dma_buf,
+ .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
+ .mmap = cma_heap_mmap,
+ .vmap = cma_heap_vmap,
+ .vunmap = cma_heap_vunmap,
+ .release = cma_heap_dma_buf_release,
+};
+
static int cma_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
- struct heap_helper_buffer *helper_buffer;
- struct page *cma_pages;
+ struct cma_heap_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
size_t size = PAGE_ALIGN(len);
- unsigned long nr_pages = size >> PAGE_SHIFT;
+ pgoff_t pagecount = size >> PAGE_SHIFT;
unsigned long align = get_order(size);
+ struct page *cma_pages;
struct dma_buf *dmabuf;
int ret = -ENOMEM;
pgoff_t pg;
- if (align > CONFIG_CMA_ALIGNMENT)
- align = CONFIG_CMA_ALIGNMENT;
-
- helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
- if (!helper_buffer)
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
return -ENOMEM;
- init_heap_helper_buffer(helper_buffer, cma_heap_free);
- helper_buffer->heap = heap;
- helper_buffer->size = len;
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->lock);
+ buffer->len = size;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
- cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
+ cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
if (!cma_pages)
- goto free_buf;
+ goto free_buffer;
+ /* Clear the cma pages */
if (PageHighMem(cma_pages)) {
- unsigned long nr_clear_pages = nr_pages;
+ unsigned long nr_clear_pages = pagecount;
struct page *page = cma_pages;
while (nr_clear_pages > 0) {
@@ -85,7 +318,6 @@ static int cma_heap_allocate(struct dma_heap *heap,
*/
if (fatal_signal_pending(current))
goto free_cma;
-
page++;
nr_clear_pages--;
}
@@ -93,28 +325,30 @@ static int cma_heap_allocate(struct dma_heap *heap,
memset(page_address(cma_pages), 0, size);
}
- helper_buffer->pagecount = nr_pages;
- helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
- sizeof(*helper_buffer->pages),
- GFP_KERNEL);
- if (!helper_buffer->pages) {
+ buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
+ if (!buffer->pages) {
ret = -ENOMEM;
goto free_cma;
}
- for (pg = 0; pg < helper_buffer->pagecount; pg++)
- helper_buffer->pages[pg] = &cma_pages[pg];
+ for (pg = 0; pg < pagecount; pg++)
+ buffer->pages[pg] = &cma_pages[pg];
+
+ buffer->cma_pages = cma_pages;
+ buffer->heap = cma_heap;
+ buffer->pagecount = pagecount;
/* create the dmabuf */
- dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ exp_info.ops = &cma_heap_buf_ops;
+ exp_info.size = buffer->len;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+ dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
goto free_pages;
}
- helper_buffer->dmabuf = dmabuf;
- helper_buffer->priv_virt = cma_pages;
-
ret = dma_buf_fd(dmabuf, fd_flags);
if (ret < 0) {
dma_buf_put(dmabuf);
@@ -125,11 +359,12 @@ static int cma_heap_allocate(struct dma_heap *heap,
return ret;
free_pages:
- kfree(helper_buffer->pages);
+ kfree(buffer->pages);
free_cma:
- cma_release(cma_heap->cma, cma_pages, nr_pages);
-free_buf:
- kfree(helper_buffer);
+ cma_release(cma_heap->cma, cma_pages, pagecount);
+free_buffer:
+ kfree(buffer);
+
return ret;
}
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
deleted file mode 100644
index fcf4ce3e2cbb..000000000000
--- a/drivers/dma-buf/heaps/heap-helpers.c
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/err.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <uapi/linux/dma-heap.h>
-
-#include "heap-helpers.h"
-
-void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
- void (*free)(struct heap_helper_buffer *))
-{
- buffer->priv_virt = NULL;
- mutex_init(&buffer->lock);
- buffer->vmap_cnt = 0;
- buffer->vaddr = NULL;
- buffer->pagecount = 0;
- buffer->pages = NULL;
- INIT_LIST_HEAD(&buffer->attachments);
- buffer->free = free;
-}
-
-struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
- int fd_flags)
-{
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &heap_helper_ops;
- exp_info.size = buffer->size;
- exp_info.flags = fd_flags;
- exp_info.priv = buffer;
-
- return dma_buf_export(&exp_info);
-}
-
-static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
-{
- void *vaddr;
-
- vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
- if (!vaddr)
- return ERR_PTR(-ENOMEM);
-
- return vaddr;
-}
-
-static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
-{
- if (buffer->vmap_cnt > 0) {
- WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
- vunmap(buffer->vaddr);
- }
-
- buffer->free(buffer);
-}
-
-static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
-{
- void *vaddr;
-
- if (buffer->vmap_cnt) {
- buffer->vmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = dma_heap_map_kernel(buffer);
- if (IS_ERR(vaddr))
- return vaddr;
- buffer->vaddr = vaddr;
- buffer->vmap_cnt++;
- return vaddr;
-}
-
-static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
-{
- if (!--buffer->vmap_cnt) {
- vunmap(buffer->vaddr);
- buffer->vaddr = NULL;
- }
-}
-
-struct dma_heaps_attachment {
- struct device *dev;
- struct sg_table table;
- struct list_head list;
-};
-
-static int dma_heap_attach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attachment)
-{
- struct dma_heaps_attachment *a;
- struct heap_helper_buffer *buffer = dmabuf->priv;
- int ret;
-
- a = kzalloc(sizeof(*a), GFP_KERNEL);
- if (!a)
- return -ENOMEM;
-
- ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
- buffer->pagecount, 0,
- buffer->pagecount << PAGE_SHIFT,
- GFP_KERNEL);
- if (ret) {
- kfree(a);
- return ret;
- }
-
- a->dev = attachment->dev;
- INIT_LIST_HEAD(&a->list);
-
- attachment->priv = a;
-
- mutex_lock(&buffer->lock);
- list_add(&a->list, &buffer->attachments);
- mutex_unlock(&buffer->lock);
-
- return 0;
-}
-
-static void dma_heap_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attachment)
-{
- struct dma_heaps_attachment *a = attachment->priv;
- struct heap_helper_buffer *buffer = dmabuf->priv;
-
- mutex_lock(&buffer->lock);
- list_del(&a->list);
- mutex_unlock(&buffer->lock);
-
- sg_free_table(&a->table);
- kfree(a);
-}
-
-static
-struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
-{
- struct dma_heaps_attachment *a = attachment->priv;
- struct sg_table *table = &a->table;
- int ret;
-
- ret = dma_map_sgtable(attachment->dev, table, direction, 0);
- if (ret)
- table = ERR_PTR(ret);
- return table;
-}
-
-static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *table,
- enum dma_data_direction direction)
-{
- dma_unmap_sgtable(attachment->dev, table, direction, 0);
-}
-
-static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct heap_helper_buffer *buffer = vma->vm_private_data;
-
- if (vmf->pgoff > buffer->pagecount)
- return VM_FAULT_SIGBUS;
-
- vmf->page = buffer->pages[vmf->pgoff];
- get_page(vmf->page);
-
- return 0;
-}
-
-static const struct vm_operations_struct dma_heap_vm_ops = {
- .fault = dma_heap_vm_fault,
-};
-
-static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
-
- if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
- return -EINVAL;
-
- vma->vm_ops = &dma_heap_vm_ops;
- vma->vm_private_data = buffer;
-
- return 0;
-}
-
-static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
-
- dma_heap_buffer_destroy(buffer);
-}
-
-static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
- struct dma_heaps_attachment *a;
- int ret = 0;
-
- mutex_lock(&buffer->lock);
-
- if (buffer->vmap_cnt)
- invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
-
- list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
- direction);
- }
- mutex_unlock(&buffer->lock);
-
- return ret;
-}
-
-static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
- struct dma_heaps_attachment *a;
-
- mutex_lock(&buffer->lock);
-
- if (buffer->vmap_cnt)
- flush_kernel_vmap_range(buffer->vaddr, buffer->size);
-
- list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
- direction);
- }
- mutex_unlock(&buffer->lock);
-
- return 0;
-}
-
-static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
- void *vaddr;
-
- mutex_lock(&buffer->lock);
- vaddr = dma_heap_buffer_vmap_get(buffer);
- mutex_unlock(&buffer->lock);
-
- if (!vaddr)
- return -ENOMEM;
- dma_buf_map_set_vaddr(map, vaddr);
-
- return 0;
-}
-
-static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
-
- mutex_lock(&buffer->lock);
- dma_heap_buffer_vmap_put(buffer);
- mutex_unlock(&buffer->lock);
-}
-
-const struct dma_buf_ops heap_helper_ops = {
- .map_dma_buf = dma_heap_map_dma_buf,
- .unmap_dma_buf = dma_heap_unmap_dma_buf,
- .mmap = dma_heap_mmap,
- .release = dma_heap_dma_buf_release,
- .attach = dma_heap_attach,
- .detach = dma_heap_detach,
- .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
- .end_cpu_access = dma_heap_dma_buf_end_cpu_access,
- .vmap = dma_heap_dma_buf_vmap,
- .vunmap = dma_heap_dma_buf_vunmap,
-};
diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h
deleted file mode 100644
index 805d2df88024..000000000000
--- a/drivers/dma-buf/heaps/heap-helpers.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * DMABUF Heaps helper code
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (C) 2019 Linaro Ltd.
- */
-
-#ifndef _HEAP_HELPERS_H
-#define _HEAP_HELPERS_H
-
-#include <linux/dma-heap.h>
-#include <linux/list.h>
-
-/**
- * struct heap_helper_buffer - helper buffer metadata
- * @heap: back pointer to the heap the buffer came from
- * @dmabuf: backing dma-buf for this buffer
- * @size: size of the buffer
- * @priv_virt pointer to heap specific private value
- * @lock mutext to protect the data in this structure
- * @vmap_cnt count of vmap references on the buffer
- * @vaddr vmap'ed virtual address
- * @pagecount number of pages in the buffer
- * @pages list of page pointers
- * @attachments list of device attachments
- *
- * @free heap callback to free the buffer
- */
-struct heap_helper_buffer {
- struct dma_heap *heap;
- struct dma_buf *dmabuf;
- size_t size;
-
- void *priv_virt;
- struct mutex lock;
- int vmap_cnt;
- void *vaddr;
- pgoff_t pagecount;
- struct page **pages;
- struct list_head attachments;
-
- void (*free)(struct heap_helper_buffer *buffer);
-};
-
-void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
- void (*free)(struct heap_helper_buffer *));
-
-struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
- int fd_flags);
-
-extern const struct dma_buf_ops heap_helper_ops;
-#endif /* _HEAP_HELPERS_H */
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 0bf688e3c023..17e0e9a68baf 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -3,7 +3,11 @@
* DMABUF System heap exporter
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (C) 2019 Linaro Ltd.
+ * Copyright (C) 2019, 2020 Linaro Ltd.
+ *
+ * Portions based off of Andrew Davis' SRAM heap:
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*/
#include <linux/dma-buf.h>
@@ -15,87 +19,404 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/sched/signal.h>
-#include <asm/page.h>
+#include <linux/vmalloc.h>
+
+static struct dma_heap *sys_heap;
+
+struct system_heap_buffer {
+ struct dma_heap *heap;
+ struct list_head attachments;
+ struct mutex lock;
+ unsigned long len;
+ struct sg_table sg_table;
+ int vmap_cnt;
+ void *vaddr;
+};
+
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
+ bool mapped;
+};
+
+#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
+ | __GFP_NORETRY) & ~__GFP_RECLAIM) \
+ | __GFP_COMP)
+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
+static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
+/*
+ * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
+ * to match with the sizes often found in IOMMUs. Using order 4 pages instead
+ * of order 0 pages can significantly improve the performance of many IOMMUs
+ * by reducing TLB pressure and time spent updating page tables.
+ */
+static const unsigned int orders[] = {8, 4, 0};
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
+static struct sg_table *dup_sg_table(struct sg_table *table)
+{
+ struct sg_table *new_table;
+ int ret, i;
+ struct scatterlist *sg, *new_sg;
+
+ new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(new_table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ new_sg = new_table->sgl;
+ for_each_sgtable_sg(table, sg, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
+ new_sg = sg_next(new_sg);
+ }
+
+ return new_table;
+}
+
+static int system_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+ struct sg_table *table;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ table = dup_sg_table(&buffer->sg_table);
+ if (IS_ERR(table)) {
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ a->table = table;
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+ a->mapped = false;
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void system_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+}
+
+static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+ struct sg_table *table = a->table;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ a->mapped = true;
+ return table;
+}
+
+static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+
+ a->mapped = false;
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->len);
-#include "heap-helpers.h"
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_device(a->dev, a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct sg_table *table = &buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ struct sg_page_iter piter;
+ int ret;
+
+ for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
+ struct page *page = sg_page_iter_page(&piter);
+
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+ addr += PAGE_SIZE;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
+{
+ struct sg_table *table = &buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+ struct sg_page_iter piter;
+ void *vaddr;
+
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_sgtable_page(table, &piter, 0) {
+ WARN_ON(tmp - pages >= npages);
+ *tmp++ = sg_page_iter_page(&piter);
+ }
+
+ vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ vfree(pages);
+
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+ int ret = 0;
+
+ mutex_lock(&buffer->lock);
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ dma_buf_map_set_vaddr(map, buffer->vaddr);
+ goto out;
+ }
+
+ vaddr = system_heap_do_vmap(buffer);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto out;
+ }
+
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+ dma_buf_map_set_vaddr(map, buffer->vaddr);
+out:
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
-struct dma_heap *sys_heap;
+static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
-static void system_heap_free(struct heap_helper_buffer *buffer)
+ mutex_lock(&buffer->lock);
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+ dma_buf_map_clear(map);
+}
+
+static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
{
- pgoff_t pg;
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i;
+
+ table = &buffer->sg_table;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
- for (pg = 0; pg < buffer->pagecount; pg++)
- __free_page(buffer->pages[pg]);
- kfree(buffer->pages);
+ __free_pages(page, compound_order(page));
+ }
+ sg_free_table(table);
kfree(buffer);
}
+static const struct dma_buf_ops system_heap_buf_ops = {
+ .attach = system_heap_attach,
+ .detach = system_heap_detach,
+ .map_dma_buf = system_heap_map_dma_buf,
+ .unmap_dma_buf = system_heap_unmap_dma_buf,
+ .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = system_heap_dma_buf_end_cpu_access,
+ .mmap = system_heap_mmap,
+ .vmap = system_heap_vmap,
+ .vunmap = system_heap_vunmap,
+ .release = system_heap_dma_buf_release,
+};
+
+static struct page *alloc_largest_available(unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ if (size < (PAGE_SIZE << orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_pages(order_flags[i], orders[i]);
+ if (!page)
+ continue;
+ return page;
+ }
+ return NULL;
+}
+
static int system_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
{
- struct heap_helper_buffer *helper_buffer;
+ struct system_heap_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ unsigned long size_remaining = len;
+ unsigned int max_order = orders[0];
struct dma_buf *dmabuf;
- int ret = -ENOMEM;
- pgoff_t pg;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ struct list_head pages;
+ struct page *page, *tmp_page;
+ int i, ret = -ENOMEM;
- helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
- if (!helper_buffer)
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
return -ENOMEM;
- init_heap_helper_buffer(helper_buffer, system_heap_free);
- helper_buffer->heap = heap;
- helper_buffer->size = len;
-
- helper_buffer->pagecount = len / PAGE_SIZE;
- helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
- sizeof(*helper_buffer->pages),
- GFP_KERNEL);
- if (!helper_buffer->pages) {
- ret = -ENOMEM;
- goto err0;
- }
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->lock);
+ buffer->heap = heap;
+ buffer->len = len;
- for (pg = 0; pg < helper_buffer->pagecount; pg++) {
+ INIT_LIST_HEAD(&pages);
+ i = 0;
+ while (size_remaining > 0) {
/*
* Avoid trying to allocate memory if the process
- * has been killed by by SIGKILL
+ * has been killed by SIGKILL
*/
if (fatal_signal_pending(current))
- goto err1;
+ goto free_buffer;
+
+ page = alloc_largest_available(size_remaining, max_order);
+ if (!page)
+ goto free_buffer;
+
+ list_add_tail(&page->lru, &pages);
+ size_remaining -= page_size(page);
+ max_order = compound_order(page);
+ i++;
+ }
+
+ table = &buffer->sg_table;
+ if (sg_alloc_table(table, i, GFP_KERNEL))
+ goto free_buffer;
- helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!helper_buffer->pages[pg])
- goto err1;
+ sg = table->sgl;
+ list_for_each_entry_safe(page, tmp_page, &pages, lru) {
+ sg_set_page(sg, page, page_size(page), 0);
+ sg = sg_next(sg);
+ list_del(&page->lru);
}
/* create the dmabuf */
- dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ exp_info.ops = &system_heap_buf_ops;
+ exp_info.size = buffer->len;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+ dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
- goto err1;
+ goto free_pages;
}
- helper_buffer->dmabuf = dmabuf;
-
ret = dma_buf_fd(dmabuf, fd_flags);
if (ret < 0) {
dma_buf_put(dmabuf);
/* just return, as put will call release and that will free */
return ret;
}
-
return ret;
-err1:
- while (pg > 0)
- __free_page(helper_buffer->pages[--pg]);
- kfree(helper_buffer->pages);
-err0:
- kfree(helper_buffer);
+free_pages:
+ for_each_sgtable_sg(table, sg, i) {
+ struct page *p = sg_page(sg);
+
+ __free_pages(p, compound_order(p));
+ }
+ sg_free_table(table);
+free_buffer:
+ list_for_each_entry_safe(page, tmp_page, &pages, lru)
+ __free_pages(page, compound_order(page));
+ kfree(buffer);
return ret;
}
@@ -107,7 +428,6 @@ static const struct dma_heap_ops system_heap_ops = {
static int system_heap_create(void)
{
struct dma_heap_export_info exp_info;
- int ret = 0;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
@@ -115,9 +435,9 @@ static int system_heap_create(void)
sys_heap = dma_heap_add(&exp_info);
if (IS_ERR(sys_heap))
- ret = PTR_ERR(sys_heap);
+ return PTR_ERR(sys_heap);
- return ret;
+ return 0;
}
module_init(system_heap_create);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 90284ffda58a..d242c7632621 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -296,6 +296,16 @@ config INTEL_IDXD
If unsure, say N.
+# Config symbol that collects all the dependencies that's necessary to
+# support shared virtual memory for the devices supported by idxd.
+config INTEL_IDXD_SVM
+ bool "Accelerator Shared Virtual Memory Support"
+ depends on INTEL_IDXD
+ depends on INTEL_IOMMU_SVM
+ depends on PCI_PRI
+ depends on PCI_PASID
+ depends on PCI_IOV
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 3b53115db268..fe45ad5d06c4 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -30,7 +30,24 @@
#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
+#define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
+#define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
+#define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
+#define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
+#define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
+#define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
+#define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
+#define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
+#define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
+ AT_XDMAC_WRHP(0x5))
#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
+#define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
+#define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
+#define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
+#define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
+#define AT_XDMAC_GWAC_M2M 0
+#define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
+
#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
@@ -38,13 +55,6 @@
#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
-#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
-#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
-#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
-#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
-#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
-#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
-#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
/* Channel relative registers offsets */
@@ -150,8 +160,6 @@
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
-#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
-
/* Microblock control members */
#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
@@ -179,6 +187,29 @@ enum atc_status {
AT_XDMAC_CHAN_IS_PAUSED,
};
+struct at_xdmac_layout {
+ /* Global Channel Read Suspend Register */
+ u8 grs;
+ /* Global Write Suspend Register */
+ u8 gws;
+ /* Global Channel Read Write Suspend Register */
+ u8 grws;
+ /* Global Channel Read Write Resume Register */
+ u8 grwr;
+ /* Global Channel Software Request Register */
+ u8 gswr;
+ /* Global channel Software Request Status Register */
+ u8 gsws;
+ /* Global Channel Software Flush Request Register */
+ u8 gswf;
+ /* Channel reg base */
+ u8 chan_cc_reg_base;
+ /* Source/Destination Interface must be specified or not */
+ bool sdif;
+ /* AXI queue priority configuration supported */
+ bool axi_config;
+};
+
/* ----- Channels ----- */
struct at_xdmac_chan {
struct dma_chan chan;
@@ -212,6 +243,7 @@ struct at_xdmac {
struct clk *clk;
u32 save_gim;
struct dma_pool *at_xdmac_desc_pool;
+ const struct at_xdmac_layout *layout;
struct at_xdmac_chan chan[];
};
@@ -244,9 +276,35 @@ struct at_xdmac_desc {
struct list_head xfer_node;
} __aligned(sizeof(u64));
+static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
+ .grs = 0x28,
+ .gws = 0x2C,
+ .grws = 0x30,
+ .grwr = 0x34,
+ .gswr = 0x38,
+ .gsws = 0x3C,
+ .gswf = 0x40,
+ .chan_cc_reg_base = 0x50,
+ .sdif = true,
+ .axi_config = false,
+};
+
+static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
+ .grs = 0x30,
+ .gws = 0x38,
+ .grws = 0x40,
+ .grwr = 0x44,
+ .gswr = 0x48,
+ .gsws = 0x4C,
+ .gswf = 0x50,
+ .chan_cc_reg_base = 0x60,
+ .sdif = false,
+ .axi_config = true,
+};
+
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
{
- return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
+ return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
}
#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
@@ -345,8 +403,10 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
first->active_xfer = true;
/* Tell xdmac where to get the first descriptor. */
- reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
- | AT_XDMAC_CNDA_NDAIF(atchan->memif);
+ reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
+ if (atxdmac->layout->sdif)
+ reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
+
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
/*
@@ -541,6 +601,7 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
enum dma_transfer_direction direction)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
int csize, dwidth;
if (direction == DMA_DEV_TO_MEM) {
@@ -548,12 +609,14 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
AT91_XDMAC_DT_PERID(atchan->perid)
| AT_XDMAC_CC_DAM_INCREMENTED_AM
| AT_XDMAC_CC_SAM_FIXED_AM
- | AT_XDMAC_CC_DIF(atchan->memif)
- | AT_XDMAC_CC_SIF(atchan->perif)
| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
| AT_XDMAC_CC_DSYNC_PER2MEM
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_PER_TRAN;
+ if (atxdmac->layout->sdif)
+ atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
+ AT_XDMAC_CC_SIF(atchan->perif);
+
csize = ffs(atchan->sconfig.src_maxburst) - 1;
if (csize < 0) {
dev_err(chan2dev(chan), "invalid src maxburst value\n");
@@ -571,12 +634,14 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
AT91_XDMAC_DT_PERID(atchan->perid)
| AT_XDMAC_CC_DAM_FIXED_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
- | AT_XDMAC_CC_DIF(atchan->perif)
- | AT_XDMAC_CC_SIF(atchan->memif)
| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
| AT_XDMAC_CC_DSYNC_MEM2PER
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_PER_TRAN;
+ if (atxdmac->layout->sdif)
+ atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
+ AT_XDMAC_CC_SIF(atchan->memif);
+
csize = ffs(atchan->sconfig.dst_maxburst) - 1;
if (csize < 0) {
dev_err(chan2dev(chan), "invalid src maxburst value\n");
@@ -866,10 +931,12 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
* ERRATA: Even if useless for memory transfers, the PERID has to not
* match the one of another channel. If not, it could lead to spurious
* flag status.
+ * For SAMA7G5x case, the SIF and DIF fields are no longer used.
+ * Thus, no need to have the SIF/DIF interfaces here.
+ * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
+ * zero.
*/
- u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
- | AT_XDMAC_CC_DIF(0)
- | AT_XDMAC_CC_SIF(0)
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_MEM_TRAN;
@@ -1048,12 +1115,14 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* ERRATA: Even if useless for memory transfers, the PERID has to not
* match the one of another channel. If not, it could lead to spurious
* flag status.
+ * For SAMA7G5x case, the SIF and DIF fields are no longer used.
+ * Thus, no need to have the SIF/DIF interfaces here.
+ * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
+ * zero.
*/
- u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
| AT_XDMAC_CC_DAM_INCREMENTED_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
- | AT_XDMAC_CC_DIF(0)
- | AT_XDMAC_CC_SIF(0)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_MEM_TRAN;
unsigned long irqflags;
@@ -1154,12 +1223,14 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
* ERRATA: Even if useless for memory transfers, the PERID has to not
* match the one of another channel. If not, it could lead to spurious
* flag status.
+ * For SAMA7G5x case, the SIF and DIF fields are no longer used.
+ * Thus, no need to have the SIF/DIF interfaces here.
+ * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
+ * zero.
*/
- u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
| AT_XDMAC_CC_DAM_UBS_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
- | AT_XDMAC_CC_DIF(0)
- | AT_XDMAC_CC_SIF(0)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_MEMSET_HW_MODE
| AT_XDMAC_CC_TYPE_MEM_TRAN;
@@ -1438,7 +1509,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
if ((desc->lld.mbr_cfg & mask) == value) {
- at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
cpu_relax();
}
@@ -1496,7 +1567,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
* FIFO flush ensures that data are really written.
*/
if ((desc->lld.mbr_cfg & mask) == value) {
- at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
cpu_relax();
}
@@ -1761,7 +1832,7 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
return 0;
spin_lock_irqsave(&atchan->lock, flags);
- at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
+ at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
cpu_relax();
@@ -1784,7 +1855,7 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
return 0;
}
- at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
+ at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1947,6 +2018,30 @@ static int atmel_xdmac_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
+static void at_xdmac_axi_config(struct platform_device *pdev)
+{
+ struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
+ bool dev_m2m = false;
+ u32 dma_requests;
+
+ if (!atxdmac->layout->axi_config)
+ return; /* Not supported */
+
+ if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
+ &dma_requests)) {
+ dev_info(&pdev->dev, "controller in mem2mem mode.\n");
+ dev_m2m = true;
+ }
+
+ if (dev_m2m) {
+ at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
+ at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
+ } else {
+ at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
+ at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
+ }
+}
+
static int at_xdmac_probe(struct platform_device *pdev)
{
struct at_xdmac *atxdmac;
@@ -1986,6 +2081,10 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac->regs = base;
atxdmac->irq = irq;
+ atxdmac->layout = of_device_get_match_data(&pdev->dev);
+ if (!atxdmac->layout)
+ return -ENODEV;
+
atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
if (IS_ERR(atxdmac->clk)) {
dev_err(&pdev->dev, "can't get dma_clk\n");
@@ -2087,6 +2186,8 @@ static int at_xdmac_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
nr_channels, atxdmac->regs);
+ at_xdmac_axi_config(pdev);
+
return 0;
err_dma_unregister:
@@ -2128,6 +2229,10 @@ static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
static const struct of_device_id atmel_xdmac_dt_ids[] = {
{
.compatible = "atmel,sama5d4-dma",
+ .data = &at_xdmac_sama5d4_layout,
+ }, {
+ .compatible = "microchip,sama7g5-dma",
+ .data = &at_xdmac_sama7g5_layout,
}, {
/* sentinel */
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index a608efaa435f..612d353648cf 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1044,7 +1044,7 @@ static struct platform_driver jz4780_dma_driver = {
.remove = jz4780_dma_remove,
.driver = {
.name = "jz4780-dma",
- .of_match_table = of_match_ptr(jz4780_dma_dt_match),
+ .of_match_table = jz4780_dma_dt_match,
},
};
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a3a172173e34..f696246f57fd 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -573,6 +573,7 @@ static int dmatest_func(void *data)
struct dmatest_params *params;
struct dma_chan *chan;
struct dma_device *dev;
+ struct device *dma_dev;
unsigned int error_count;
unsigned int failed_tests = 0;
unsigned int total_tests = 0;
@@ -606,6 +607,8 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
+ dma_dev = dmaengine_get_dma_device(chan);
+
src = &thread->src;
dst = &thread->dst;
if (thread->type == DMA_MEMCPY) {
@@ -730,7 +733,7 @@ static int dmatest_func(void *data)
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
+ um = dmaengine_get_unmap_data(dma_dev, src->cnt + dst->cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
@@ -745,10 +748,10 @@ static int dmatest_func(void *data)
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
- um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
+ um->addr[i] = dma_map_page(dma_dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
srcs[i] = um->addr[i] + src->off;
- ret = dma_mapping_error(dev->dev, um->addr[i]);
+ ret = dma_mapping_error(dma_dev, um->addr[i]);
if (ret) {
result("src mapping error", total_tests,
src->off, dst->off, len, ret);
@@ -763,9 +766,9 @@ static int dmatest_func(void *data)
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
- dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
+ dsts[i] = dma_map_page(dma_dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(dev->dev, dsts[i]);
+ ret = dma_mapping_error(dma_dev, dsts[i]);
if (ret) {
result("dst mapping error", total_tests,
src->off, dst->off, len, ret);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index 14c1ac26f866..e164f3295f5d 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -992,7 +992,7 @@ static struct platform_driver dw_driver = {
.remove = dw_remove,
.driver = {
.name = KBUILD_MODNAME,
- .of_match_table = of_match_ptr(dw_dma_of_id_table),
+ .of_match_table = dw_dma_of_id_table,
.pm = &dw_axi_dma_pm_ops,
},
};
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index b971505b8715..08d71dafa001 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
if (desc->chunk) {
/* Create and add new element into the linked list */
- desc->chunks_alloc++;
- list_add_tail(&chunk->list, &desc->chunk->list);
if (!dw_edma_alloc_burst(chunk)) {
kfree(chunk);
return NULL;
}
+ desc->chunks_alloc++;
+ list_add_tail(&chunk->list, &desc->chunk->list);
} else {
/* List head */
chunk->burst = NULL;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7ab83fe601ed..19a23767533a 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -982,8 +982,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s\n", __func__);
+ pm_runtime_get_sync(dw->dma.dev);
+
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
+ pm_runtime_put_sync_suspend(dw->dma.dev);
dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
return -EIO;
}
@@ -1000,6 +1003,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* We need controller-specific data to set up slave transfers.
*/
if (chan->private && !dw_dma_filter(chan, chan->private)) {
+ pm_runtime_put_sync_suspend(dw->dma.dev);
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
return -EINVAL;
}
@@ -1043,6 +1047,8 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
if (!dw->in_use)
do_dw_dma_off(dw);
+ pm_runtime_put_sync_suspend(dw->dma.dev);
+
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index e1a958ae7925..a259ee010e9b 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -431,9 +431,8 @@ static irqreturn_t hisi_dma_irq(int irq, void *data)
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
struct hisi_dma_desc *desc;
struct hisi_dma_cqe *cqe;
- unsigned long flags;
- spin_lock_irqsave(&chan->vc.lock, flags);
+ spin_lock(&chan->vc.lock);
desc = chan->desc;
cqe = chan->cq + chan->cq_head;
@@ -452,7 +451,7 @@ static irqreturn_t hisi_dma_irq(int irq, void *data)
chan->desc = NULL;
}
- spin_unlock_irqrestore(&chan->vc.lock, flags);
+ spin_unlock(&chan->vc.lock);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index f5a84c846394..f4c07ad3be15 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -667,9 +667,7 @@ static int idma64_platform_remove(struct platform_device *pdev)
return idma64_remove(chip);
}
-#ifdef CONFIG_PM_SLEEP
-
-static int idma64_pm_suspend(struct device *dev)
+static int __maybe_unused idma64_pm_suspend(struct device *dev)
{
struct idma64_chip *chip = dev_get_drvdata(dev);
@@ -677,7 +675,7 @@ static int idma64_pm_suspend(struct device *dev)
return 0;
}
-static int idma64_pm_resume(struct device *dev)
+static int __maybe_unused idma64_pm_resume(struct device *dev)
{
struct idma64_chip *chip = dev_get_drvdata(dev);
@@ -685,8 +683,6 @@ static int idma64_pm_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-
static const struct dev_pm_ops idma64_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
};
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index c3976156db2f..0db9b82ed8cf 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -11,6 +11,7 @@
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/poll.h>
+#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
#include "idxd.h"
@@ -27,12 +28,15 @@ struct idxd_cdev_context {
*/
static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
{ .name = "dsa" },
+ { .name = "iax" }
};
struct idxd_user_context {
struct idxd_wq *wq;
struct task_struct *task;
+ unsigned int pasid;
unsigned int flags;
+ struct iommu_sva *sva;
};
enum idxd_cdev_cleanup {
@@ -75,6 +79,8 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_wq *wq;
struct device *dev;
int rc = 0;
+ struct iommu_sva *sva;
+ unsigned int pasid;
wq = inode_wq(inode);
idxd = wq->idxd;
@@ -95,6 +101,34 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
ctx->wq = wq;
filp->private_data = ctx;
+
+ if (device_pasid_enabled(idxd)) {
+ sva = iommu_sva_bind_device(dev, current->mm, NULL);
+ if (IS_ERR(sva)) {
+ rc = PTR_ERR(sva);
+ dev_err(dev, "pasid allocation failed: %d\n", rc);
+ goto failed;
+ }
+
+ pasid = iommu_sva_get_pasid(sva);
+ if (pasid == IOMMU_PASID_INVALID) {
+ iommu_sva_unbind_device(sva);
+ goto failed;
+ }
+
+ ctx->sva = sva;
+ ctx->pasid = pasid;
+
+ if (wq_dedicated(wq)) {
+ rc = idxd_wq_set_pasid(wq, pasid);
+ if (rc < 0) {
+ iommu_sva_unbind_device(sva);
+ dev_err(dev, "wq set pasid failed: %d\n", rc);
+ goto failed;
+ }
+ }
+ }
+
idxd_wq_get(wq);
mutex_unlock(&wq->wq_lock);
return 0;
@@ -111,13 +145,27 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
+ int rc;
dev_dbg(dev, "%s called\n", __func__);
filep->private_data = NULL;
/* Wait for in-flight operations to complete. */
- idxd_wq_drain(wq);
+ if (wq_shared(wq)) {
+ idxd_device_drain_pasid(idxd, ctx->pasid);
+ } else {
+ if (device_pasid_enabled(idxd)) {
+ /* The wq disable in the disable pasid function will drain the wq */
+ rc = idxd_wq_disable_pasid(wq);
+ if (rc < 0)
+ dev_err(dev, "wq disable pasid failed.\n");
+ } else {
+ idxd_wq_drain(wq);
+ }
+ }
+ if (ctx->sva)
+ iommu_sva_unbind_device(ctx->sva);
kfree(ctx);
mutex_lock(&wq->wq_lock);
idxd_wq_put(wq);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 663344987e3f..95f94a3ed6be 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -131,6 +131,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
int rc, num_descs, i;
+ int align;
+ u64 tmp;
if (wq->type != IDXD_WQT_KERNEL)
return 0;
@@ -142,14 +144,27 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
if (rc < 0)
return rc;
- wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
- wq->compls = dma_alloc_coherent(dev, wq->compls_size,
- &wq->compls_addr, GFP_KERNEL);
- if (!wq->compls) {
+ if (idxd->type == IDXD_TYPE_DSA)
+ align = 32;
+ else if (idxd->type == IDXD_TYPE_IAX)
+ align = 64;
+ else
+ return -ENODEV;
+
+ wq->compls_size = num_descs * idxd->compl_size + align;
+ wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
+ &wq->compls_addr_raw, GFP_KERNEL);
+ if (!wq->compls_raw) {
rc = -ENOMEM;
goto fail_alloc_compls;
}
+ /* Adjust alignment */
+ wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
+ tmp = (u64)wq->compls_raw;
+ tmp = (tmp + (align - 1)) & ~(align - 1);
+ wq->compls = (struct dsa_completion_record *)tmp;
+
rc = alloc_descs(wq, num_descs);
if (rc < 0)
goto fail_alloc_descs;
@@ -163,9 +178,11 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
struct idxd_desc *desc = wq->descs[i];
desc->hw = wq->hw_descs[i];
- desc->completion = &wq->compls[i];
- desc->compl_dma = wq->compls_addr +
- sizeof(struct dsa_completion_record) * i;
+ if (idxd->type == IDXD_TYPE_DSA)
+ desc->completion = &wq->compls[i];
+ else if (idxd->type == IDXD_TYPE_IAX)
+ desc->iax_completion = &wq->iax_compls[i];
+ desc->compl_dma = wq->compls_addr + idxd->compl_size * i;
desc->id = i;
desc->wq = wq;
desc->cpu = -1;
@@ -178,7 +195,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
fail_sbitmap_init:
free_descs(wq);
fail_alloc_descs:
- dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
+ wq->compls_addr_raw);
fail_alloc_compls:
free_hw_descs(wq);
return rc;
@@ -193,7 +211,8 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
free_hw_descs(wq);
free_descs(wq);
- dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
+ wq->compls_addr_raw);
sbitmap_queue_free(&wq->sbq);
}
@@ -273,10 +292,9 @@ int idxd_wq_map_portal(struct idxd_wq *wq)
start = pci_resource_start(pdev, IDXD_WQ_BAR);
start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
- wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
- if (!wq->dportal)
+ wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
+ if (!wq->portal)
return -ENOMEM;
- dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
return 0;
}
@@ -285,7 +303,61 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
{
struct device *dev = &wq->idxd->pdev->dev;
- devm_iounmap(dev, wq->dportal);
+ devm_iounmap(dev, wq->portal);
+}
+
+int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+ union wqcfg wqcfg;
+ unsigned int offset;
+ unsigned long flags;
+
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ return rc;
+
+ offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
+ wqcfg.pasid_en = 1;
+ wqcfg.pasid = pasid;
+ iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int idxd_wq_disable_pasid(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+ union wqcfg wqcfg;
+ unsigned int offset;
+ unsigned long flags;
+
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ return rc;
+
+ offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
+ wqcfg.pasid_en = 0;
+ wqcfg.pasid = 0;
+ iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0)
+ return rc;
+
+ return 0;
}
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
@@ -301,6 +373,7 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
wq->group = NULL;
wq->threshold = 0;
wq->priority = 0;
+ wq->ats_dis = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
@@ -468,6 +541,17 @@ void idxd_device_reset(struct idxd_device *idxd)
spin_unlock_irqrestore(&idxd->dev_lock, flags);
}
+void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
+{
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand;
+
+ operand = pasid;
+ dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
+ idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
+ dev_dbg(dev, "pasid %d drained\n", pasid);
+}
+
/* Device configuration bits */
static void idxd_group_config_write(struct idxd_group *group)
{
@@ -479,24 +563,22 @@ static void idxd_group_config_write(struct idxd_group *group)
dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
/* setup GRPWQCFG */
- for (i = 0; i < 4; i++) {
- grpcfg_offset = idxd->grpcfg_offset +
- group->id * 64 + i * sizeof(u64);
- iowrite64(group->grpcfg.wqs[i],
- idxd->reg_base + grpcfg_offset);
+ for (i = 0; i < GRPWQCFG_STRIDES; i++) {
+ grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
+ iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
group->id, i, grpcfg_offset,
ioread64(idxd->reg_base + grpcfg_offset));
}
/* setup GRPENGCFG */
- grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
+ grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
/* setup GRPFLAGS */
- grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
+ grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
group->id, grpcfg_offset,
@@ -554,9 +636,24 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
/* byte 8-11 */
wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
- wq->wqcfg->mode = 1;
+ if (wq_dedicated(wq))
+ wq->wqcfg->mode = 1;
+
+ if (device_pasid_enabled(idxd)) {
+ wq->wqcfg->pasid_en = 1;
+ if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
+ wq->wqcfg->pasid = idxd->pasid;
+ }
+
wq->wqcfg->priority = wq->priority;
+ if (idxd->hw.gen_cap.block_on_fault &&
+ test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
+ wq->wqcfg->bof = 1;
+
+ if (idxd->hw.wq_cap.wq_ats_support)
+ wq->wqcfg->wq_ats_disable = wq->ats_dis;
+
/* bytes 12-15 */
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
@@ -664,8 +761,8 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
if (!wq->size)
continue;
- if (!wq_dedicated(wq)) {
- dev_warn(dev, "No shared workqueue support.\n");
+ if (wq_shared(wq) && !device_swq_supported(idxd)) {
+ dev_warn(dev, "No shared wq support but configured.\n");
return -EINVAL;
}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 0c892cbd72e0..8ed2773d8285 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -61,8 +61,6 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
u64 addr_f1, u64 addr_f2, u64 len,
u64 compl, u32 flags)
{
- struct idxd_device *idxd = wq->idxd;
-
hw->flags = flags;
hw->opcode = opcode;
hw->src_addr = addr_f1;
@@ -70,13 +68,6 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->xfer_size = len;
hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
hw->completion_addr = compl;
-
- /*
- * Descriptor completion vectors are 1-8 for MSIX. We will round
- * robin through the 8 vectors.
- */
- wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
- hw->int_handle = wq->vec_ptr;
}
static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index d48f193daacc..5a50e91c71bf 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -20,7 +20,8 @@ extern struct kmem_cache *idxd_desc_pool;
enum idxd_type {
IDXD_TYPE_UNKNOWN = -1,
IDXD_TYPE_DSA = 0,
- IDXD_TYPE_MAX
+ IDXD_TYPE_IAX,
+ IDXD_TYPE_MAX,
};
#define IDXD_NAME_SIZE 128
@@ -34,6 +35,11 @@ struct idxd_irq_entry {
int id;
struct llist_head pending_llist;
struct list_head work_list;
+ /*
+ * Lock to protect access between irq thread process descriptor
+ * and irq thread processing error descriptor.
+ */
+ spinlock_t list_lock;
};
struct idxd_group {
@@ -59,6 +65,7 @@ enum idxd_wq_state {
enum idxd_wq_flag {
WQ_FLAG_DEDICATED = 0,
+ WQ_FLAG_BLOCK_ON_FAULT,
};
enum idxd_wq_type {
@@ -86,10 +93,11 @@ enum idxd_op_type {
enum idxd_complete_type {
IDXD_COMPLETE_NORMAL = 0,
IDXD_COMPLETE_ABORT,
+ IDXD_COMPLETE_DEV_FAIL,
};
struct idxd_wq {
- void __iomem *dportal;
+ void __iomem *portal;
struct device conf_dev;
struct idxd_cdev idxd_cdev;
struct idxd_device *idxd;
@@ -107,8 +115,13 @@ struct idxd_wq {
u32 vec_ptr; /* interrupt steering */
struct dsa_hw_desc **hw_descs;
int num_descs;
- struct dsa_completion_record *compls;
+ union {
+ struct dsa_completion_record *compls;
+ struct iax_completion_record *iax_compls;
+ };
+ void *compls_raw;
dma_addr_t compls_addr;
+ dma_addr_t compls_addr_raw;
int compls_size;
struct idxd_desc **descs;
struct sbitmap_queue sbq;
@@ -116,6 +129,7 @@ struct idxd_wq {
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
+ bool ats_dis;
};
struct idxd_engine {
@@ -145,6 +159,7 @@ enum idxd_device_state {
enum idxd_device_flag {
IDXD_FLAG_CONFIGURABLE = 0,
IDXD_FLAG_CMD_RUNNING,
+ IDXD_FLAG_PASID_ENABLED,
};
struct idxd_device {
@@ -167,6 +182,9 @@ struct idxd_device {
struct idxd_wq *wqs;
struct idxd_engine *engines;
+ struct iommu_sva *sva;
+ unsigned int pasid;
+
int num_groups;
u32 msix_perm_offset;
@@ -184,6 +202,7 @@ struct idxd_device {
int token_limit;
int nr_tokens; /* non-reserved tokens */
unsigned int wqcfg_size;
+ int compl_size;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
@@ -198,9 +217,15 @@ struct idxd_device {
/* IDXD software descriptor */
struct idxd_desc {
- struct dsa_hw_desc *hw;
+ union {
+ struct dsa_hw_desc *hw;
+ struct iax_hw_desc *iax_hw;
+ };
dma_addr_t desc_dma;
- struct dsa_completion_record *completion;
+ union {
+ struct dsa_completion_record *completion;
+ struct iax_completion_record *iax_completion;
+ };
dma_addr_t compl_dma;
struct dma_async_tx_descriptor txd;
struct llist_node llnode;
@@ -214,12 +239,30 @@ struct idxd_desc {
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
extern struct bus_type dsa_bus_type;
+extern struct bus_type iax_bus_type;
+
+extern bool support_enqcmd;
static inline bool wq_dedicated(struct idxd_wq *wq)
{
return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}
+static inline bool wq_shared(struct idxd_wq *wq)
+{
+ return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
+}
+
+static inline bool device_pasid_enabled(struct idxd_device *idxd)
+{
+ return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+}
+
+static inline bool device_swq_supported(struct idxd_device *idxd)
+{
+ return (support_enqcmd && device_pasid_enabled(idxd));
+}
+
enum idxd_portal_prot {
IDXD_PORTAL_UNLIMITED = 0,
IDXD_PORTAL_LIMITED,
@@ -242,6 +285,8 @@ static inline void idxd_set_type(struct idxd_device *idxd)
if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
idxd->type = IDXD_TYPE_DSA;
+ else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
+ idxd->type = IDXD_TYPE_IAX;
else
idxd->type = IDXD_TYPE_UNKNOWN;
}
@@ -288,6 +333,7 @@ void idxd_device_reset(struct idxd_device *idxd);
void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
/* work queue control */
int idxd_wq_alloc_resources(struct idxd_wq *wq);
@@ -298,6 +344,8 @@ void idxd_wq_drain(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
+int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
+int idxd_wq_disable_pasid(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 0a4432b063b5..2c051e07c34c 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -14,6 +14,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/idr.h>
+#include <linux/intel-svm.h>
+#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include <linux/dmaengine.h>
#include "../dmaengine.h"
@@ -26,18 +28,24 @@ MODULE_AUTHOR("Intel Corporation");
#define DRV_NAME "idxd"
+bool support_enqcmd;
+
static struct idr idxd_idrs[IDXD_TYPE_MAX];
static struct mutex idxd_idr_lock;
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+
+ /* IAX ver 1.0 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
static char *idxd_name[] = {
"dsa",
+ "iax"
};
const char *idxd_get_dev_name(struct idxd_device *idxd)
@@ -53,6 +61,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
struct idxd_irq_entry *irq_entry;
int i, msixcnt;
int rc = 0;
+ union msix_perm mperm;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
@@ -92,6 +101,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
for (i = 0; i < msixcnt; i++) {
idxd->irq_entries[i].id = i;
idxd->irq_entries[i].idxd = idxd;
+ spin_lock_init(&idxd->irq_entries[i].list_lock);
}
msix = &idxd->msix_entries[0];
@@ -131,6 +141,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
idxd_unmask_error_interrupts(idxd);
+ /* Setup MSIX permission table */
+ mperm.bits = 0;
+ mperm.pasid = idxd->pasid;
+ mperm.pasid_en = device_pasid_enabled(idxd);
+ for (i = 1; i < msixcnt; i++)
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+
return 0;
err_no_irq:
@@ -201,17 +218,14 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
- offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
- + sizeof(u64));
- idxd->grpcfg_offset = offsets.grpcfg * 0x100;
+ offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
+ idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
- idxd->wqcfg_offset = offsets.wqcfg * 0x100;
- dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
- idxd->wqcfg_offset);
- idxd->msix_perm_offset = offsets.msix_perm * 0x100;
- dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
- idxd->msix_perm_offset);
- idxd->perfmon_offset = offsets.perfmon * 0x100;
+ idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
+ dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
+ idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
+ dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
+ idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
}
@@ -265,8 +279,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
}
}
-static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
- void __iomem * const *iomap)
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
@@ -276,12 +289,45 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
return NULL;
idxd->pdev = pdev;
- idxd->reg_base = iomap[IDXD_MMIO_BAR];
spin_lock_init(&idxd->dev_lock);
return idxd;
}
+static int idxd_enable_system_pasid(struct idxd_device *idxd)
+{
+ int flags;
+ unsigned int pasid;
+ struct iommu_sva *sva;
+
+ flags = SVM_FLAG_SUPERVISOR_MODE;
+
+ sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
+ if (IS_ERR(sva)) {
+ dev_warn(&idxd->pdev->dev,
+ "iommu sva bind failed: %ld\n", PTR_ERR(sva));
+ return PTR_ERR(sva);
+ }
+
+ pasid = iommu_sva_get_pasid(sva);
+ if (pasid == IOMMU_PASID_INVALID) {
+ iommu_sva_unbind_device(sva);
+ return -ENODEV;
+ }
+
+ idxd->sva = sva;
+ idxd->pasid = pasid;
+ dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
+ return 0;
+}
+
+static void idxd_disable_system_pasid(struct idxd_device *idxd)
+{
+
+ iommu_sva_unbind_device(idxd->sva);
+ idxd->sva = NULL;
+}
+
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -292,6 +338,14 @@ static int idxd_probe(struct idxd_device *idxd)
idxd_device_init_reset(idxd);
dev_dbg(dev, "IDXD reset complete\n");
+ if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc < 0)
+ dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ }
+
idxd_read_caps(idxd);
idxd_read_table_offsets(idxd);
@@ -322,29 +376,37 @@ static int idxd_probe(struct idxd_device *idxd)
idxd_mask_error_interrupts(idxd);
idxd_mask_msix_vectors(idxd);
err_setup:
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
return rc;
}
+static void idxd_type_init(struct idxd_device *idxd)
+{
+ if (idxd->type == IDXD_TYPE_DSA)
+ idxd->compl_size = sizeof(struct dsa_completion_record);
+ else if (idxd->type == IDXD_TYPE_IAX)
+ idxd->compl_size = sizeof(struct iax_completion_record);
+}
+
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- void __iomem * const *iomap;
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
int rc;
- unsigned int mask;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- dev_dbg(dev, "Mapping BARs\n");
- mask = (1 << IDXD_MMIO_BAR);
- rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
- if (rc)
- return rc;
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev);
+ if (!idxd)
+ return -ENOMEM;
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
+ dev_dbg(dev, "Mapping BARs\n");
+ idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
+ if (!idxd->reg_base)
return -ENOMEM;
dev_dbg(dev, "Set DMA masks\n");
@@ -360,13 +422,10 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- dev_dbg(dev, "Alloc IDXD context\n");
- idxd = idxd_alloc(pdev, iomap);
- if (!idxd)
- return -ENOMEM;
-
idxd_set_type(idxd);
+ idxd_type_init(idxd);
+
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
pci_set_drvdata(pdev, idxd);
@@ -452,6 +511,8 @@ static void idxd_remove(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "%s called\n", __func__);
idxd_cleanup_sysfs(idxd);
idxd_shutdown(pdev);
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
mutex_lock(&idxd_idr_lock);
idr_remove(&idxd_idrs[idxd->type], idxd->id);
mutex_unlock(&idxd_idr_lock);
@@ -470,7 +531,7 @@ static int __init idxd_init_module(void)
int err, i;
/*
- * If the CPU does not support write512, there's no point in
+ * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
* enumerating the device. We can not utilize it.
*/
if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
@@ -478,8 +539,10 @@ static int __init idxd_init_module(void)
return -ENODEV;
}
- pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
- DRV_NAME, IDXD_DRIVER_VERSION);
+ if (!boot_cpu_has(X86_FEATURE_ENQCMD))
+ pr_warn("Platform does not have ENQCMD(S) support.\n");
+ else
+ support_enqcmd = true;
mutex_init(&idxd_idr_lock);
for (i = 0; i < IDXD_TYPE_MAX; i++)
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 17a65a13fb64..593a2f6ed16c 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -11,6 +11,24 @@
#include "idxd.h"
#include "registers.h"
+enum irq_work_type {
+ IRQ_WORK_NORMAL = 0,
+ IRQ_WORK_PROCESS_FAULT,
+};
+
+struct idxd_fault {
+ struct work_struct work;
+ u64 addr;
+ struct idxd_device *idxd;
+};
+
+static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
+ enum irq_work_type wtype,
+ int *processed, u64 data);
+static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
+ enum irq_work_type wtype,
+ int *processed, u64 data);
+
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -44,6 +62,46 @@ static void idxd_device_reinit(struct work_struct *work)
idxd_device_wqs_clear_state(idxd);
}
+static void idxd_device_fault_work(struct work_struct *work)
+{
+ struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
+ struct idxd_irq_entry *ie;
+ int i;
+ int processed;
+ int irqcnt = fault->idxd->num_wq_irqs + 1;
+
+ for (i = 1; i < irqcnt; i++) {
+ ie = &fault->idxd->irq_entries[i];
+ irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
+ &processed, fault->addr);
+ if (processed)
+ break;
+
+ irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
+ &processed, fault->addr);
+ if (processed)
+ break;
+ }
+
+ kfree(fault);
+}
+
+static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
+ u64 fault_addr)
+{
+ struct idxd_fault *fault;
+
+ fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
+ if (!fault)
+ return -ENOMEM;
+
+ fault->addr = fault_addr;
+ fault->idxd = idxd;
+ INIT_WORK(&fault->work, idxd_device_fault_work);
+ queue_work(idxd->wq, &fault->work);
+ return 0;
+}
+
irqreturn_t idxd_irq_handler(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
@@ -125,6 +183,15 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
if (!err)
goto out;
+ /*
+ * This case should rarely happen and typically is due to software
+ * programming error by the driver.
+ */
+ if (idxd->sw_err.valid &&
+ idxd->sw_err.desc_valid &&
+ idxd->sw_err.fault_addr)
+ idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
+
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
idxd->state = IDXD_DEV_HALTED;
@@ -152,57 +219,110 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
return IRQ_HANDLED;
}
+static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
+{
+ /*
+ * Completion address can be bad as well. Check fault address match for descriptor
+ * and completion address.
+ */
+ if ((u64)desc->hw == fault_addr ||
+ (u64)desc->completion == fault_addr) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
+ return true;
+ }
+
+ return false;
+}
+
+static bool complete_desc(struct idxd_desc *desc)
+{
+ if (desc->completion->status) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ return true;
+ }
+
+ return false;
+}
+
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- int *processed)
+ enum irq_work_type wtype,
+ int *processed, u64 data)
{
struct idxd_desc *desc, *t;
struct llist_node *head;
int queued = 0;
+ bool completed = false;
+ unsigned long flags;
*processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
- return 0;
+ goto out;
llist_for_each_entry_safe(desc, t, head, llnode) {
- if (desc->completion->status) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ if (wtype == IRQ_WORK_NORMAL)
+ completed = complete_desc(desc);
+ else if (wtype == IRQ_WORK_PROCESS_FAULT)
+ completed = process_fault(desc, data);
+
+ if (completed) {
idxd_free_desc(desc->wq, desc);
(*processed)++;
+ if (wtype == IRQ_WORK_PROCESS_FAULT)
+ break;
} else {
- list_add_tail(&desc->list, &irq_entry->work_list);
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
+ list_add_tail(&desc->list,
+ &irq_entry->work_list);
+ spin_unlock_irqrestore(&irq_entry->list_lock, flags);
queued++;
}
}
+ out:
return queued;
}
static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- int *processed)
+ enum irq_work_type wtype,
+ int *processed, u64 data)
{
struct list_head *node, *next;
int queued = 0;
+ bool completed = false;
+ unsigned long flags;
*processed = 0;
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
if (list_empty(&irq_entry->work_list))
- return 0;
+ goto out;
list_for_each_safe(node, next, &irq_entry->work_list) {
struct idxd_desc *desc =
container_of(node, struct idxd_desc, list);
- if (desc->completion->status) {
+ spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+ if (wtype == IRQ_WORK_NORMAL)
+ completed = complete_desc(desc);
+ else if (wtype == IRQ_WORK_PROCESS_FAULT)
+ completed = process_fault(desc, data);
+
+ if (completed) {
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
list_del(&desc->list);
- /* process and callback */
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ spin_unlock_irqrestore(&irq_entry->list_lock, flags);
idxd_free_desc(desc->wq, desc);
(*processed)++;
+ if (wtype == IRQ_WORK_PROCESS_FAULT)
+ return queued;
} else {
queued++;
}
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
}
+ out:
+ spin_unlock_irqrestore(&irq_entry->list_lock, flags);
return queued;
}
@@ -230,12 +350,14 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
* 5. Repeat until no more descriptors.
*/
do {
- rc = irq_process_work_list(irq_entry, &processed);
+ rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
+ &processed, 0);
total += processed;
if (rc != 0)
continue;
- rc = irq_process_pending_llist(irq_entry, &processed);
+ rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
+ &processed, 0);
total += processed;
} while (rc != 0);
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 54390334c243..751ecb4f9f81 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -5,6 +5,7 @@
/* PCI Config */
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
+#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
#define IDXD_MMIO_BAR 0
#define IDXD_WQ_BAR 2
@@ -47,7 +48,7 @@ union wq_cap_reg {
u64 rsvd:20;
u64 shared_mode:1;
u64 dedicated_mode:1;
- u64 rsvd2:1;
+ u64 wq_ats_support:1;
u64 priority:1;
u64 occupancy:1;
u64 occupancy_int:1;
@@ -102,6 +103,8 @@ union offsets_reg {
u64 bits[2];
} __packed;
+#define IDXD_TABLE_MULT 0x100
+
#define IDXD_GENCFG_OFFSET 0x80
union gencfg_reg {
struct {
@@ -301,7 +304,8 @@ union wqcfg {
/* bytes 8-11 */
u32 mode:1; /* shared or dedicated */
u32 bof:1; /* block on fault */
- u32 rsvd2:2;
+ u32 wq_ats_disable:1;
+ u32 rsvd2:1;
u32 priority:4;
u32 pasid:20;
u32 pasid_en:1;
@@ -336,6 +340,8 @@ union wqcfg {
u32 bits[8];
} __packed;
+#define WQCFG_PASID_IDX 2
+
/*
* This macro calculates the offset into the WQCFG register
* idxd - struct idxd *
@@ -354,4 +360,22 @@ union wqcfg {
#define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
+#define GRPCFG_SIZE 64
+#define GRPWQCFG_STRIDES 4
+
+/*
+ * This macro calculates the offset into the GRPCFG register
+ * idxd - struct idxd *
+ * n - wq id
+ * ofs - the index of the 32b dword for the config register
+ *
+ * The WQCFG register block is divided into groups per each wq. The n index
+ * allows us to move to the register group that's for that particular wq.
+ * Each register is 32bits. The ofs gives us the number of register to access.
+ */
+#define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
+ (n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
+#define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32)
+#define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40)
+
#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 417048e3c42a..a7a61bcc17d5 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -11,11 +11,22 @@
static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
{
struct idxd_desc *desc;
+ struct idxd_device *idxd = wq->idxd;
desc = wq->descs[idx];
memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
- memset(desc->completion, 0, sizeof(struct dsa_completion_record));
+ memset(desc->completion, 0, idxd->compl_size);
desc->cpu = cpu;
+
+ if (device_pasid_enabled(idxd))
+ desc->hw->pasid = idxd->pasid;
+
+ /*
+ * Descriptor completion vectors are 1-8 for MSIX. We will round
+ * robin through the 8 vectors.
+ */
+ wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+ desc->hw->int_handle = wq->vec_ptr;
return desc;
}
@@ -70,18 +81,32 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
struct idxd_device *idxd = wq->idxd;
int vec = desc->hw->int_handle;
void __iomem *portal;
+ int rc;
if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
- portal = wq->dportal;
+ portal = wq->portal;
+
/*
- * The wmb() flushes writes to coherent DMA data before possibly
- * triggering a DMA read. The wmb() is necessary even on UP because
- * the recipient is a device.
+ * The wmb() flushes writes to coherent DMA data before
+ * possibly triggering a DMA read. The wmb() is necessary
+ * even on UP because the recipient is a device.
*/
wmb();
- iosubmit_cmds512(portal, desc->hw, 1);
+ if (wq_dedicated(wq)) {
+ iosubmit_cmds512(portal, desc->hw, 1);
+ } else {
+ /*
+ * It's not likely that we would receive queue full rejection
+ * since the descriptor allocation gates at wq size. If we
+ * receive a -EAGAIN, that means something went wrong such as the
+ * device is not accepting descriptor at all.
+ */
+ rc = enqcmds(portal, desc->hw);
+ if (rc < 0)
+ return rc;
+ }
/*
* Pending the descriptor to the lockless list for the irq_entry
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 07a5db06a29a..4dbb03c545e4 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -41,14 +41,24 @@ static struct device_type dsa_device_type = {
.release = idxd_conf_device_release,
};
+static struct device_type iax_device_type = {
+ .name = "iax",
+ .release = idxd_conf_device_release,
+};
+
static inline bool is_dsa_dev(struct device *dev)
{
return dev ? dev->type == &dsa_device_type : false;
}
+static inline bool is_iax_dev(struct device *dev)
+{
+ return dev ? dev->type == &iax_device_type : false;
+}
+
static inline bool is_idxd_dev(struct device *dev)
{
- return is_dsa_dev(dev);
+ return is_dsa_dev(dev) || is_iax_dev(dev);
}
static inline bool is_idxd_wq_dev(struct device *dev)
@@ -175,6 +185,30 @@ static int idxd_config_bus_probe(struct device *dev)
return -EINVAL;
}
+ /* Shared WQ checks */
+ if (wq_shared(wq)) {
+ if (!device_swq_supported(idxd)) {
+ dev_warn(dev,
+ "PASID not enabled and shared WQ.\n");
+ mutex_unlock(&wq->wq_lock);
+ return -ENXIO;
+ }
+ /*
+ * Shared wq with the threshold set to 0 means the user
+ * did not set the threshold or transitioned from a
+ * dedicated wq but did not set threshold. A value
+ * of 0 would effectively disable the shared wq. The
+ * driver does not allow a value of 0 to be set for
+ * threshold via sysfs.
+ */
+ if (wq->threshold == 0) {
+ dev_warn(dev,
+ "Shared WQ and threshold 0.\n");
+ mutex_unlock(&wq->wq_lock);
+ return -EINVAL;
+ }
+ }
+
rc = idxd_wq_alloc_resources(wq);
if (rc < 0) {
mutex_unlock(&wq->wq_lock);
@@ -335,8 +369,17 @@ struct bus_type dsa_bus_type = {
.shutdown = idxd_config_bus_shutdown,
};
+struct bus_type iax_bus_type = {
+ .name = "iax",
+ .match = idxd_config_bus_match,
+ .probe = idxd_config_bus_probe,
+ .remove = idxd_config_bus_remove,
+ .shutdown = idxd_config_bus_shutdown,
+};
+
static struct bus_type *idxd_bus_types[] = {
- &dsa_bus_type
+ &dsa_bus_type,
+ &iax_bus_type
};
static struct idxd_device_driver dsa_drv = {
@@ -348,8 +391,18 @@ static struct idxd_device_driver dsa_drv = {
},
};
+static struct idxd_device_driver iax_drv = {
+ .drv = {
+ .name = "iax",
+ .bus = &iax_bus_type,
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+};
+
static struct idxd_device_driver *idxd_drvs[] = {
- &dsa_drv
+ &dsa_drv,
+ &iax_drv
};
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
@@ -361,6 +414,8 @@ static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
{
if (idxd->type == IDXD_TYPE_DSA)
return &dsa_device_type;
+ else if (idxd->type == IDXD_TYPE_IAX)
+ return &iax_device_type;
else
return NULL;
}
@@ -379,7 +434,7 @@ int idxd_register_driver(void)
return 0;
drv_fail:
- for (; i > 0; i--)
+ while (--i >= 0)
driver_unregister(&idxd_drvs[i]->drv);
return rc;
}
@@ -501,6 +556,9 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (rc < 0)
return -EINVAL;
+ if (idxd->type == IDXD_TYPE_IAX)
+ return -EOPNOTSUPP;
+
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
@@ -546,6 +604,9 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
if (rc < 0)
return -EINVAL;
+ if (idxd->type == IDXD_TYPE_IAX)
+ return -EOPNOTSUPP;
+
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
@@ -588,6 +649,9 @@ static ssize_t group_use_token_limit_store(struct device *dev,
if (rc < 0)
return -EINVAL;
+ if (idxd->type == IDXD_TYPE_IAX)
+ return -EOPNOTSUPP;
+
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
@@ -875,6 +939,8 @@ static ssize_t wq_mode_store(struct device *dev,
if (sysfs_streq(buf, "dedicated")) {
set_bit(WQ_FLAG_DEDICATED, &wq->flags);
wq->threshold = 0;
+ } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
+ clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
} else {
return -EINVAL;
}
@@ -973,6 +1039,87 @@ static ssize_t wq_priority_store(struct device *dev,
static struct device_attribute dev_attr_wq_priority =
__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
+static ssize_t wq_block_on_fault_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n",
+ test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
+}
+
+static ssize_t wq_block_on_fault_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ bool bof;
+ int rc;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -ENXIO;
+
+ rc = kstrtobool(buf, &bof);
+ if (rc < 0)
+ return rc;
+
+ if (bof)
+ set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
+ else
+ clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_block_on_fault =
+ __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
+ wq_block_on_fault_store);
+
+static ssize_t wq_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->threshold);
+}
+
+static ssize_t wq_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ unsigned int val;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (val > wq->size || val <= 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -ENXIO;
+
+ if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
+ return -EINVAL;
+
+ wq->threshold = val;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_threshold =
+ __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
+
static ssize_t wq_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1044,6 +1191,13 @@ static ssize_t wq_name_store(struct device *dev,
if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
return -EINVAL;
+ /*
+ * This is temporarily placed here until we have SVM support for
+ * dmaengine.
+ */
+ if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
+ return -EOPNOTSUPP;
+
memset(wq->name, 0, WQ_NAME_SIZE + 1);
strncpy(wq->name, buf, WQ_NAME_SIZE);
strreplace(wq->name, '\n', '\0');
@@ -1147,6 +1301,39 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
static struct device_attribute dev_attr_wq_max_batch_size =
__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
+static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->ats_dis);
+}
+
+static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ bool ats_dis;
+ int rc;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (!idxd->hw.wq_cap.wq_ats_support)
+ return -EOPNOTSUPP;
+
+ rc = kstrtobool(buf, &ats_dis);
+ if (rc < 0)
+ return rc;
+
+ wq->ats_dis = ats_dis;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_ats_disable =
+ __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1154,11 +1341,14 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_mode.attr,
&dev_attr_wq_size.attr,
&dev_attr_wq_priority.attr,
+ &dev_attr_wq_block_on_fault.attr,
+ &dev_attr_wq_threshold.attr,
&dev_attr_wq_type.attr,
&dev_attr_wq_name.attr,
&dev_attr_wq_cdev_minor.attr,
&dev_attr_wq_max_transfer_size.attr,
&dev_attr_wq_max_batch_size.attr,
+ &dev_attr_wq_ats_disable.attr,
NULL,
};
@@ -1305,6 +1495,16 @@ static ssize_t clients_show(struct device *dev,
}
static DEVICE_ATTR_RO(clients);
+static ssize_t pasid_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
+}
+static DEVICE_ATTR_RO(pasid_enabled);
+
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1424,6 +1624,7 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_gen_cap.attr,
&dev_attr_configurable.attr,
&dev_attr_clients.attr,
+ &dev_attr_pasid_enabled.attr,
&dev_attr_state.attr,
&dev_attr_errors.attr,
&dev_attr_max_tokens.attr,
@@ -1639,7 +1840,7 @@ int idxd_register_bus_type(void)
return 0;
bus_err:
- for (; i > 0; i--)
+ while (--i >= 0)
bus_unregister(idxd_bus_types[i]);
return rc;
}
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 670db04b0757..7f116bbcfad2 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -191,32 +191,13 @@ struct imxdma_filter_data {
int request;
};
-static const struct platform_device_id imx_dma_devtype[] = {
- {
- .name = "imx1-dma",
- .driver_data = IMX1_DMA,
- }, {
- .name = "imx21-dma",
- .driver_data = IMX21_DMA,
- }, {
- .name = "imx27-dma",
- .driver_data = IMX27_DMA,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
-
static const struct of_device_id imx_dma_of_dev_id[] = {
{
- .compatible = "fsl,imx1-dma",
- .data = &imx_dma_devtype[IMX1_DMA],
+ .compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA,
}, {
- .compatible = "fsl,imx21-dma",
- .data = &imx_dma_devtype[IMX21_DMA],
+ .compatible = "fsl,imx21-dma", .data = (const void *)IMX21_DMA,
}, {
- .compatible = "fsl,imx27-dma",
- .data = &imx_dma_devtype[IMX27_DMA],
+ .compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA,
}, {
/* sentinel */
}
@@ -1056,20 +1037,15 @@ static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
struct resource *res;
- const struct of_device_id *of_id;
int ret, i;
int irq, irq_err;
- of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
- if (of_id)
- pdev->id_entry = of_id->data;
-
imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
if (!imxdma)
return -ENOMEM;
imxdma->dev = &pdev->dev;
- imxdma->devtype = pdev->id_entry->driver_data;
+ imxdma->devtype = (enum imx_dma_type)of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imxdma->base = devm_ioremap_resource(&pdev->dev, res);
@@ -1263,7 +1239,6 @@ static struct platform_driver imxdma_driver = {
.name = "imx-dma",
.of_match_table = imx_dma_of_dev_id,
},
- .id_table = imx_dma_devtype,
.remove = imxdma_remove,
};
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 16b908c77db3..41ba21eea7c8 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -566,37 +566,6 @@ static struct sdma_driver_data sdma_imx8mq = {
.check_ratio = 1,
};
-static const struct platform_device_id sdma_devtypes[] = {
- {
- .name = "imx25-sdma",
- .driver_data = (unsigned long)&sdma_imx25,
- }, {
- .name = "imx31-sdma",
- .driver_data = (unsigned long)&sdma_imx31,
- }, {
- .name = "imx35-sdma",
- .driver_data = (unsigned long)&sdma_imx35,
- }, {
- .name = "imx51-sdma",
- .driver_data = (unsigned long)&sdma_imx51,
- }, {
- .name = "imx53-sdma",
- .driver_data = (unsigned long)&sdma_imx53,
- }, {
- .name = "imx6q-sdma",
- .driver_data = (unsigned long)&sdma_imx6q,
- }, {
- .name = "imx7d-sdma",
- .driver_data = (unsigned long)&sdma_imx7d,
- }, {
- .name = "imx8mq-sdma",
- .driver_data = (unsigned long)&sdma_imx8mq,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, sdma_devtypes);
-
static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
@@ -1998,11 +1967,7 @@ static int sdma_probe(struct platform_device *pdev)
s32 *saddr_arr;
const struct sdma_driver_data *drvdata = NULL;
- if (of_id)
- drvdata = of_id->data;
- else if (pdev->id_entry)
- drvdata = (void *)pdev->id_entry->driver_data;
-
+ drvdata = of_id->data;
if (!drvdata) {
dev_err(&pdev->dev, "unable to find driver data\n");
return -EINVAL;
@@ -2211,7 +2176,6 @@ static struct platform_driver sdma_driver = {
.name = "imx-sdma",
.of_match_table = sdma_dt_ids,
},
- .id_table = sdma_devtypes,
.remove = sdma_remove,
.probe = sdma_probe,
};
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 38036db284cb..104ad420abbe 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1160,14 +1160,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
struct idmac_tx_desc *desc, *descnew;
bool done = false;
u32 ready0, ready1, curbuf, err;
- unsigned long flags;
struct dmaengine_desc_callback cb;
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
- spin_lock_irqsave(&ipu_data.lock, flags);
+ spin_lock(&ipu_data.lock);
ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
@@ -1176,7 +1175,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (err & (1 << chan_id)) {
idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
- spin_unlock_irqrestore(&ipu_data.lock, flags);
+ spin_unlock(&ipu_data.lock);
/*
* Doing this
* ichan->sg[0] = ichan->sg[1] = NULL;
@@ -1188,7 +1187,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
chan_id, ready0, ready1, curbuf);
return IRQ_HANDLED;
}
- spin_unlock_irqrestore(&ipu_data.lock, flags);
+ spin_unlock(&ipu_data.lock);
/* Other interrupts do not interfere with this channel */
spin_lock(&ichan->lock);
@@ -1251,9 +1250,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (unlikely(sgnew)) {
ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
} else {
- spin_lock_irqsave(&ipu_data.lock, flags);
+ spin_lock(&ipu_data.lock);
ipu_ic_disable_task(&ipu_data, chan_id);
- spin_unlock_irqrestore(&ipu_data.lock, flags);
+ spin_unlock(&ipu_data.lock);
ichan->status = IPU_CHANNEL_READY;
/* Continue to check for complete descriptor */
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index f609a84c493c..d0b2e601e3e5 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -223,24 +223,23 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
i = __ffs(stat);
stat &= ~BIT(i);
if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
- unsigned long flags;
p = &d->phy[i];
c = p->vchan;
if (c && (tc1 & BIT(i))) {
- spin_lock_irqsave(&c->vc.lock, flags);
+ spin_lock(&c->vc.lock);
if (p->ds_run != NULL) {
vchan_cookie_complete(&p->ds_run->vd);
p->ds_done = p->ds_run;
p->ds_run = NULL;
}
- spin_unlock_irqrestore(&c->vc.lock, flags);
+ spin_unlock(&c->vc.lock);
}
if (c && (tc2 & BIT(i))) {
- spin_lock_irqsave(&c->vc.lock, flags);
+ spin_lock(&c->vc.lock);
if (p->ds_run != NULL)
vchan_cyclic_callback(&p->ds_run->vd);
- spin_unlock_irqrestore(&c->vc.lock, flags);
+ spin_unlock(&c->vc.lock);
}
irq_chan |= BIT(i);
}
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index f133ae8dece1..6ad8afbb95f2 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
err_free:
+ mtk_hsdma_hw_deinit(hsdma);
of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index 85a597228fb0..d29d01e730aa 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -160,10 +160,9 @@ static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id)
{
struct milbeaut_xdmac_chan *mc = dev_id;
struct milbeaut_xdmac_desc *md;
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&mc->vc.lock, flags);
+ spin_lock(&mc->vc.lock);
/* Ack and Stop */
val = FIELD_PREP(M10V_XDDSD_IS_MASK, 0x0);
@@ -177,7 +176,7 @@ static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id)
milbeaut_xdmac_start(mc);
out:
- spin_unlock_irqrestore(&mc->vc.lock, flags);
+ spin_unlock(&mc->vc.lock);
return IRQ_HANDLED;
}
@@ -351,7 +350,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
ret = dma_async_device_register(ddev);
if (ret)
- return ret;
+ goto disable_xdmac;
ret = of_dma_controller_register(dev->of_node,
of_dma_simple_xlate, mdev);
@@ -364,6 +363,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
unregister_dmac:
dma_async_device_unregister(ddev);
+disable_xdmac:
+ disable_xdmac(mdev);
return ret;
}
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 347146a6e1d0..74755093e14b 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -524,7 +524,6 @@ static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
struct moxart_dmadev *mc = devid;
struct moxart_chan *ch = &mc->slave_chans[0];
unsigned int i;
- unsigned long flags;
u32 ctrl;
dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
@@ -541,14 +540,14 @@ static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
if (ctrl & APB_DMA_FIN_INT_STS) {
ctrl &= ~APB_DMA_FIN_INT_STS;
if (ch->desc) {
- spin_lock_irqsave(&ch->vc.lock, flags);
+ spin_lock(&ch->vc.lock);
if (++ch->sgidx < ch->desc->sglen) {
moxart_dma_start_sg(ch, ch->sgidx);
} else {
vchan_cookie_complete(&ch->desc->vd);
moxart_dma_start_desc(&ch->vc.chan);
}
- spin_unlock_irqrestore(&ch->vc.lock, flags);
+ spin_unlock(&ch->vc.lock);
}
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 00cd1335eeba..23b232b57518 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1455,7 +1455,7 @@ static struct platform_driver mv_xor_driver = {
.resume = mv_xor_resume,
.driver = {
.name = MV_XOR_NAME,
- .of_match_table = of_match_ptr(mv_xor_dt_ids),
+ .of_match_table = mv_xor_dt_ids,
},
};
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 2753a6b916f6..9b0d463f89bb 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -771,8 +771,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
goto disable_clk;
msi_desc = first_msi_entry(&pdev->dev);
- if (!msi_desc)
+ if (!msi_desc) {
+ ret = -ENODEV;
goto free_msi_irqs;
+ }
xor_dev->msi_desc = msi_desc;
ret = devm_request_irq(&pdev->dev, msi_desc->irq,
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 65f816b40c32..994fc4d2aca4 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -167,29 +167,11 @@ static struct mxs_dma_type mxs_dma_types[] = {
}
};
-static const struct platform_device_id mxs_dma_ids[] = {
- {
- .name = "imx23-dma-apbh",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
- }, {
- .name = "imx23-dma-apbx",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
- }, {
- .name = "imx28-dma-apbh",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
- }, {
- .name = "imx28-dma-apbx",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
- }, {
- /* end of list */
- }
-};
-
static const struct of_device_id mxs_dma_dt_ids[] = {
- { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
- { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
- { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
- { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
+ { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_types[0], },
+ { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_types[1], },
+ { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_types[2], },
+ { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_types[3], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
@@ -762,8 +744,6 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
static int __init mxs_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct platform_device_id *id_entry;
- const struct of_device_id *of_id;
const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
struct resource *iores;
@@ -779,13 +759,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
return ret;
}
- of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
- if (of_id)
- id_entry = of_id->data;
- else
- id_entry = platform_get_device_id(pdev);
-
- dma_type = (struct mxs_dma_type *)id_entry->driver_data;
+ dma_type = (struct mxs_dma_type *)of_device_get_match_data(&pdev->dev);
mxs_dma->type = dma_type->type;
mxs_dma->dev_id = dma_type->id;
@@ -865,7 +839,6 @@ static struct platform_driver mxs_dma_driver = {
.name = "mxs-dma",
.of_match_table = mxs_dma_dt_ids,
},
- .id_table = mxs_dma_ids,
};
static int __init mxs_dma_module_init(void)
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 8a4f608904b9..ec00b20ae8e4 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -75,8 +75,18 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
ofdma->dma_router->route_free(ofdma->dma_router->dev,
route_data);
} else {
+ int ret = 0;
+
chan->router = ofdma->dma_router;
chan->route_data = route_data;
+
+ if (chan->device->device_router_config)
+ ret = chan->device->device_router_config(chan);
+
+ if (ret) {
+ dma_release_channel(chan);
+ chan = ERR_PTR(ret);
+ }
}
/*
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 0f5c19370f6d..bc0f66af0f11 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1527,8 +1527,6 @@ static int pl330_submit_req(struct pl330_thread *thrd,
/* First dry run to check if req is acceptable */
ret = _setup_req(pl330, 1, thrd, idx, &xs);
- if (ret < 0)
- goto xfer_exit;
if (ret > pl330->mcbufsz / 2) {
dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 71cdaaa8134c..df7704053d91 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -69,7 +69,7 @@ struct ppc_dma_chan_ref {
};
/* The list of channels exported by ppc440spe ADMA */
-struct list_head
+static struct list_head
ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
/* This flag is set when want to refetch the xor chain in the interrupt
@@ -559,7 +559,6 @@ static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
int sg_index, unsigned char mult_value)
{
struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
u32 *psgu;
switch (chan->device->id) {
@@ -590,7 +589,6 @@ static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
*psgu |= cpu_to_le32(mult_value << mult_index);
break;
case PPC440SPE_XOR_ID:
- xor_hw_desc = desc->hw_desc;
break;
default:
BUG();
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 349fb312c872..4a2a796e348c 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -606,7 +606,6 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
struct pxad_chan *chan = phy->vchan;
struct virt_dma_desc *vd, *tmp;
unsigned int dcsr;
- unsigned long flags;
bool vd_completed;
dma_cookie_t last_started = 0;
@@ -616,7 +615,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
if (dcsr & PXA_DCSR_RUN)
return IRQ_NONE;
- spin_lock_irqsave(&chan->vc.lock, flags);
+ spin_lock(&chan->vc.lock);
list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
vd_completed = is_desc_completed(vd);
dev_dbg(&chan->vc.chan.dev->device,
@@ -658,7 +657,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
pxad_launch_chan(chan, to_pxad_sw_desc(vd));
}
}
- spin_unlock_irqrestore(&chan->vc.lock, flags);
+ spin_unlock(&chan->vc.lock);
wake_up(&chan->wq_state);
return IRQ_HANDLED;
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
index 3bcb689162c6..365f94eb3b08 100644
--- a/drivers/dma/qcom/Kconfig
+++ b/drivers/dma/qcom/Kconfig
@@ -1,4 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
+config QCOM_ADM
+ tristate "Qualcomm ADM support"
+ depends on (ARCH_QCOM || COMPILE_TEST) && !PHYS_ADDR_T_64BIT
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Qualcomm Application Data Mover (ADM) DMA
+ controller, as present on MSM8x60, APQ8064, and IPQ8064 devices.
+ This controller provides DMA capabilities for both general purpose
+ and on-chip peripheral devices.
+
config QCOM_BAM_DMA
tristate "QCOM BAM DMA support"
depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
@@ -8,6 +19,18 @@ config QCOM_BAM_DMA
Enable support for the QCOM BAM DMA controller. This controller
provides DMA capabilities for a variety of on-chip devices.
+config QCOM_GPI_DMA
+ tristate "Qualcomm Technologies GPI DMA support"
+ depends on ARCH_QCOM
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the QCOM GPI DMA controller. This controller
+ provides DMA capabilities for a variety of peripheral buses such
+ as I2C, UART, and SPI. By using GPI dmaengine driver, bus drivers
+ can use a standardize interface that is protocol independent to
+ transfer data between DDR and peripheral.
+
config QCOM_HIDMA_MGMT
tristate "Qualcomm Technologies HIDMA Management support"
select DMA_ENGINE
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
index 1ae92da88b0c..50f1e7014693 100644
--- a/drivers/dma/qcom/Makefile
+++ b/drivers/dma/qcom/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_ADM) += qcom_adm.o
obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
+obj-$(CONFIG_QCOM_GPI_DMA) += gpi.o
obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
obj-$(CONFIG_QCOM_HIDMA) += hdma.o
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 4eeb8bb27279..88579857ca1d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -630,7 +630,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
GFP_NOWAIT);
if (!async_desc)
- goto err_out;
+ return NULL;
if (flags & DMA_PREP_FENCE)
async_desc->flags |= DESC_FLAG_NWD;
@@ -670,10 +670,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
}
return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
-
-err_out:
- kfree(async_desc);
- return NULL;
}
/**
@@ -875,7 +871,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
- return ret;
+ return IRQ_NONE;
if (srcs & BAM_IRQ) {
clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
new file mode 100644
index 000000000000..1a0bf6b0567a
--- /dev/null
+++ b/drivers/dma/qcom/gpi.c
@@ -0,0 +1,2303 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Limited
+ */
+
+#include <dt-bindings/dma/qcom-gpi.h>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/dma/qcom-gpi-dma.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+#define TRE_TYPE_DMA 0x10
+#define TRE_TYPE_GO 0x20
+#define TRE_TYPE_CONFIG0 0x22
+
+/* TRE flags */
+#define TRE_FLAGS_CHAIN BIT(0)
+#define TRE_FLAGS_IEOB BIT(8)
+#define TRE_FLAGS_IEOT BIT(9)
+#define TRE_FLAGS_BEI BIT(10)
+#define TRE_FLAGS_LINK BIT(11)
+#define TRE_FLAGS_TYPE GENMASK(23, 16)
+
+/* SPI CONFIG0 WD0 */
+#define TRE_SPI_C0_WORD_SZ GENMASK(4, 0)
+#define TRE_SPI_C0_LOOPBACK BIT(8)
+#define TRE_SPI_C0_CS BIT(11)
+#define TRE_SPI_C0_CPHA BIT(12)
+#define TRE_SPI_C0_CPOL BIT(13)
+#define TRE_SPI_C0_TX_PACK BIT(24)
+#define TRE_SPI_C0_RX_PACK BIT(25)
+
+/* CONFIG0 WD2 */
+#define TRE_C0_CLK_DIV GENMASK(11, 0)
+#define TRE_C0_CLK_SRC GENMASK(19, 16)
+
+/* SPI GO WD0 */
+#define TRE_SPI_GO_CMD GENMASK(4, 0)
+#define TRE_SPI_GO_CS GENMASK(10, 8)
+#define TRE_SPI_GO_FRAG BIT(26)
+
+/* GO WD2 */
+#define TRE_RX_LEN GENMASK(23, 0)
+
+/* I2C Config0 WD0 */
+#define TRE_I2C_C0_TLOW GENMASK(7, 0)
+#define TRE_I2C_C0_THIGH GENMASK(15, 8)
+#define TRE_I2C_C0_TCYL GENMASK(23, 16)
+#define TRE_I2C_C0_TX_PACK BIT(24)
+#define TRE_I2C_C0_RX_PACK BIT(25)
+
+/* I2C GO WD0 */
+#define TRE_I2C_GO_CMD GENMASK(4, 0)
+#define TRE_I2C_GO_ADDR GENMASK(14, 8)
+#define TRE_I2C_GO_STRETCH BIT(26)
+
+/* DMA TRE */
+#define TRE_DMA_LEN GENMASK(23, 0)
+
+/* Register offsets from gpi-top */
+#define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_CH_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
+#define GPII_n_CH_k_CNTXT_0_CHSTATE GENMASK(23, 20)
+#define GPII_n_CH_k_CNTXT_0_ERIDX GENMASK(18, 14)
+#define GPII_n_CH_k_CNTXT_0_DIR BIT(3)
+#define GPII_n_CH_k_CNTXT_0_PROTO GENMASK(2, 0)
+
+#define GPII_n_CH_k_CNTXT_0(el_size, erindex, dir, chtype_proto) \
+ (FIELD_PREP(GPII_n_CH_k_CNTXT_0_EL_SIZE, el_size) | \
+ FIELD_PREP(GPII_n_CH_k_CNTXT_0_ERIDX, erindex) | \
+ FIELD_PREP(GPII_n_CH_k_CNTXT_0_DIR, dir) | \
+ FIELD_PREP(GPII_n_CH_k_CNTXT_0_PROTO, chtype_proto))
+
+#define GPI_CHTYPE_DIR_IN (0)
+#define GPI_CHTYPE_DIR_OUT (1)
+
+#define GPI_CHTYPE_PROTO_GPI (0x2)
+
+#define GPII_n_CH_k_DOORBELL_0_OFFS(n, k) (0x22000 + (0x4000 * (n)) + (0x8 * (k)))
+#define GPII_n_CH_CMD_OFFS(n) (0x23008 + (0x4000 * (n)))
+#define GPII_n_CH_CMD_OPCODE GENMASK(31, 24)
+#define GPII_n_CH_CMD_CHID GENMASK(7, 0)
+#define GPII_n_CH_CMD(opcode, chid) \
+ (FIELD_PREP(GPII_n_CH_CMD_OPCODE, opcode) | \
+ FIELD_PREP(GPII_n_CH_CMD_CHID, chid))
+
+#define GPII_n_CH_CMD_ALLOCATE (0)
+#define GPII_n_CH_CMD_START (1)
+#define GPII_n_CH_CMD_STOP (2)
+#define GPII_n_CH_CMD_RESET (9)
+#define GPII_n_CH_CMD_DE_ALLOC (10)
+#define GPII_n_CH_CMD_UART_SW_STALE (32)
+#define GPII_n_CH_CMD_UART_RFR_READY (33)
+#define GPII_n_CH_CMD_UART_RFR_NOT_READY (34)
+
+/* EV Context Array */
+#define GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) (0x21000 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_EV_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
+#define GPII_n_EV_k_CNTXT_0_CHSTATE GENMASK(23, 20)
+#define GPII_n_EV_k_CNTXT_0_INTYPE BIT(16)
+#define GPII_n_EV_k_CNTXT_0_CHTYPE GENMASK(3, 0)
+
+#define GPII_n_EV_k_CNTXT_0(el_size, inttype, chtype) \
+ (FIELD_PREP(GPII_n_EV_k_CNTXT_0_EL_SIZE, el_size) | \
+ FIELD_PREP(GPII_n_EV_k_CNTXT_0_INTYPE, inttype) | \
+ FIELD_PREP(GPII_n_EV_k_CNTXT_0_CHTYPE, chtype))
+
+#define GPI_INTTYPE_IRQ (1)
+#define GPI_CHTYPE_GPI_EV (0x2)
+
+enum CNTXT_OFFS {
+ CNTXT_0_CONFIG = 0x0,
+ CNTXT_1_R_LENGTH = 0x4,
+ CNTXT_2_RING_BASE_LSB = 0x8,
+ CNTXT_3_RING_BASE_MSB = 0xC,
+ CNTXT_4_RING_RP_LSB = 0x10,
+ CNTXT_5_RING_RP_MSB = 0x14,
+ CNTXT_6_RING_WP_LSB = 0x18,
+ CNTXT_7_RING_WP_MSB = 0x1C,
+ CNTXT_8_RING_INT_MOD = 0x20,
+ CNTXT_9_RING_INTVEC = 0x24,
+ CNTXT_10_RING_MSI_LSB = 0x28,
+ CNTXT_11_RING_MSI_MSB = 0x2C,
+ CNTXT_12_RING_RP_UPDATE_LSB = 0x30,
+ CNTXT_13_RING_RP_UPDATE_MSB = 0x34,
+};
+
+#define GPII_n_EV_CH_k_DOORBELL_0_OFFS(n, k) (0x22100 + (0x4000 * (n)) + (0x8 * (k)))
+#define GPII_n_EV_CH_CMD_OFFS(n) (0x23010 + (0x4000 * (n)))
+#define GPII_n_EV_CMD_OPCODE GENMASK(31, 24)
+#define GPII_n_EV_CMD_CHID GENMASK(7, 0)
+#define GPII_n_EV_CMD(opcode, chid) \
+ (FIELD_PREP(GPII_n_EV_CMD_OPCODE, opcode) | \
+ FIELD_PREP(GPII_n_EV_CMD_CHID, chid))
+
+#define GPII_n_EV_CH_CMD_ALLOCATE (0x00)
+#define GPII_n_EV_CH_CMD_RESET (0x09)
+#define GPII_n_EV_CH_CMD_DE_ALLOC (0x0A)
+
+#define GPII_n_CNTXT_TYPE_IRQ_OFFS(n) (0x23080 + (0x4000 * (n)))
+
+/* mask type register */
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) (0x23088 + (0x4000 * (n)))
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK GENMASK(6, 0)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL BIT(6)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB BIT(3)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB BIT(2)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL BIT(1)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL BIT(0)
+
+#define GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(n) (0x23090 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) (0x23094 + (0x4000 * (n)))
+
+/* Mask channel control interrupt register */
+#define GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(n) (0x23098 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK GENMASK(1, 0)
+
+/* Mask event control interrupt register */
+#define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (0x2309C + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK BIT(0)
+
+#define GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(n) (0x230A0 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (0x230A4 + (0x4000 * (n)))
+
+/* Mask event interrupt register */
+#define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) (0x230B8 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK BIT(0)
+
+#define GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) (0x230C0 + (0x4000 * (n)))
+#define GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) (0x23100 + (0x4000 * (n)))
+#define GPI_GLOB_IRQ_ERROR_INT_MSK BIT(0)
+
+/* GPII specific Global - Enable bit register */
+#define GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(n) (0x23108 + (0x4000 * (n)))
+#define GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) (0x23110 + (0x4000 * (n)))
+#define GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(n) (0x23118 + (0x4000 * (n)))
+
+/* GPII general interrupt - Enable bit register */
+#define GPII_n_CNTXT_GPII_IRQ_EN_OFFS(n) (0x23120 + (0x4000 * (n)))
+#define GPII_n_CNTXT_GPII_IRQ_EN_BMSK GENMASK(3, 0)
+
+#define GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(n) (0x23128 + (0x4000 * (n)))
+
+/* GPII Interrupt Type register */
+#define GPII_n_CNTXT_INTSET_OFFS(n) (0x23180 + (0x4000 * (n)))
+#define GPII_n_CNTXT_INTSET_BMSK BIT(0)
+
+#define GPII_n_CNTXT_MSI_BASE_LSB_OFFS(n) (0x23188 + (0x4000 * (n)))
+#define GPII_n_CNTXT_MSI_BASE_MSB_OFFS(n) (0x2318C + (0x4000 * (n)))
+#define GPII_n_CNTXT_SCRATCH_0_OFFS(n) (0x23400 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SCRATCH_1_OFFS(n) (0x23404 + (0x4000 * (n)))
+
+#define GPII_n_ERROR_LOG_OFFS(n) (0x23200 + (0x4000 * (n)))
+
+/* QOS Registers */
+#define GPII_n_CH_k_QOS_OFFS(n, k) (0x2005C + (0x4000 * (n)) + (0x80 * (k)))
+
+/* Scratch registers */
+#define GPII_n_CH_k_SCRATCH_0_OFFS(n, k) (0x20060 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_CH_k_SCRATCH_0_SEID GENMASK(2, 0)
+#define GPII_n_CH_k_SCRATCH_0_PROTO GENMASK(7, 4)
+#define GPII_n_CH_k_SCRATCH_0_PAIR GENMASK(20, 16)
+#define GPII_n_CH_k_SCRATCH_0(pair, proto, seid) \
+ (FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PAIR, pair) | \
+ FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PROTO, proto) | \
+ FIELD_PREP(GPII_n_CH_k_SCRATCH_0_SEID, seid))
+#define GPII_n_CH_k_SCRATCH_1_OFFS(n, k) (0x20064 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_CH_k_SCRATCH_2_OFFS(n, k) (0x20068 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_CH_k_SCRATCH_3_OFFS(n, k) (0x2006C + (0x4000 * (n)) + (0x80 * (k)))
+
+struct __packed gpi_tre {
+ u32 dword[4];
+};
+
+enum msm_gpi_tce_code {
+ MSM_GPI_TCE_SUCCESS = 1,
+ MSM_GPI_TCE_EOT = 2,
+ MSM_GPI_TCE_EOB = 4,
+ MSM_GPI_TCE_UNEXP_ERR = 16,
+};
+
+#define CMD_TIMEOUT_MS (250)
+
+#define MAX_CHANNELS_PER_GPII (2)
+#define GPI_TX_CHAN (0)
+#define GPI_RX_CHAN (1)
+#define STATE_IGNORE (U32_MAX)
+#define EV_FACTOR (2)
+#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
+#define CHAN_TRES 64
+
+struct __packed xfer_compl_event {
+ u64 ptr;
+ u32 length:24;
+ u8 code;
+ u16 status;
+ u8 type;
+ u8 chid;
+};
+
+struct __packed immediate_data_event {
+ u8 data_bytes[8];
+ u8 length:4;
+ u8 resvd:4;
+ u16 tre_index;
+ u8 code;
+ u16 status;
+ u8 type;
+ u8 chid;
+};
+
+struct __packed qup_notif_event {
+ u32 status;
+ u32 time;
+ u32 count:24;
+ u8 resvd;
+ u16 resvd1;
+ u8 type;
+ u8 chid;
+};
+
+struct __packed gpi_ere {
+ u32 dword[4];
+};
+
+enum GPI_EV_TYPE {
+ XFER_COMPLETE_EV_TYPE = 0x22,
+ IMMEDIATE_DATA_EV_TYPE = 0x30,
+ QUP_NOTIF_EV_TYPE = 0x31,
+ STALE_EV_TYPE = 0xFF,
+};
+
+union __packed gpi_event {
+ struct __packed xfer_compl_event xfer_compl_event;
+ struct __packed immediate_data_event immediate_data_event;
+ struct __packed qup_notif_event qup_notif_event;
+ struct __packed gpi_ere gpi_ere;
+};
+
+enum gpii_irq_settings {
+ DEFAULT_IRQ_SETTINGS,
+ MASK_IEOB_SETTINGS,
+};
+
+enum gpi_ev_state {
+ DEFAULT_EV_CH_STATE = 0,
+ EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
+ EV_STATE_ALLOCATED,
+ MAX_EV_STATES
+};
+
+static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
+ [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
+ [EV_STATE_ALLOCATED] = "ALLOCATED",
+};
+
+#define TO_GPI_EV_STATE_STR(_state) (((_state) >= MAX_EV_STATES) ? \
+ "INVALID" : gpi_ev_state_str[(_state)])
+
+enum gpi_ch_state {
+ DEFAULT_CH_STATE = 0x0,
+ CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
+ CH_STATE_ALLOCATED = 0x1,
+ CH_STATE_STARTED = 0x2,
+ CH_STATE_STOPPED = 0x3,
+ CH_STATE_STOP_IN_PROC = 0x4,
+ CH_STATE_ERROR = 0xf,
+ MAX_CH_STATES
+};
+
+enum gpi_cmd {
+ GPI_CH_CMD_BEGIN,
+ GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
+ GPI_CH_CMD_START,
+ GPI_CH_CMD_STOP,
+ GPI_CH_CMD_RESET,
+ GPI_CH_CMD_DE_ALLOC,
+ GPI_CH_CMD_UART_SW_STALE,
+ GPI_CH_CMD_UART_RFR_READY,
+ GPI_CH_CMD_UART_RFR_NOT_READY,
+ GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
+ GPI_EV_CMD_BEGIN,
+ GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
+ GPI_EV_CMD_RESET,
+ GPI_EV_CMD_DEALLOC,
+ GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
+ GPI_MAX_CMD,
+};
+
+#define IS_CHAN_CMD(_cmd) ((_cmd) <= GPI_CH_CMD_END)
+
+static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
+ [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
+ [GPI_CH_CMD_START] = "CH START",
+ [GPI_CH_CMD_STOP] = "CH STOP",
+ [GPI_CH_CMD_RESET] = "CH_RESET",
+ [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
+ [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
+ [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
+ [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
+ [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
+ [GPI_EV_CMD_RESET] = "EV RESET",
+ [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
+};
+
+#define TO_GPI_CMD_STR(_cmd) (((_cmd) >= GPI_MAX_CMD) ? "INVALID" : \
+ gpi_cmd_str[(_cmd)])
+
+/*
+ * @DISABLE_STATE: no register access allowed
+ * @CONFIG_STATE: client has configured the channel
+ * @PREP_HARDWARE: register access is allowed
+ * however, no processing EVENTS
+ * @ACTIVE_STATE: channels are fully operational
+ * @PREPARE_TERMINATE: graceful termination of channels
+ * register access is allowed
+ * @PAUSE_STATE: channels are active, but not processing any events
+ */
+enum gpi_pm_state {
+ DISABLE_STATE,
+ CONFIG_STATE,
+ PREPARE_HARDWARE,
+ ACTIVE_STATE,
+ PREPARE_TERMINATE,
+ PAUSE_STATE,
+ MAX_PM_STATE
+};
+
+#define REG_ACCESS_VALID(_pm_state) ((_pm_state) >= PREPARE_HARDWARE)
+
+static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
+ [DISABLE_STATE] = "DISABLE",
+ [CONFIG_STATE] = "CONFIG",
+ [PREPARE_HARDWARE] = "PREPARE HARDWARE",
+ [ACTIVE_STATE] = "ACTIVE",
+ [PREPARE_TERMINATE] = "PREPARE TERMINATE",
+ [PAUSE_STATE] = "PAUSE",
+};
+
+#define TO_GPI_PM_STR(_state) (((_state) >= MAX_PM_STATE) ? \
+ "INVALID" : gpi_pm_state_str[(_state)])
+
+static const struct {
+ enum gpi_cmd gpi_cmd;
+ u32 opcode;
+ u32 state;
+} gpi_cmd_info[GPI_MAX_CMD] = {
+ {
+ GPI_CH_CMD_ALLOCATE,
+ GPII_n_CH_CMD_ALLOCATE,
+ CH_STATE_ALLOCATED,
+ },
+ {
+ GPI_CH_CMD_START,
+ GPII_n_CH_CMD_START,
+ CH_STATE_STARTED,
+ },
+ {
+ GPI_CH_CMD_STOP,
+ GPII_n_CH_CMD_STOP,
+ CH_STATE_STOPPED,
+ },
+ {
+ GPI_CH_CMD_RESET,
+ GPII_n_CH_CMD_RESET,
+ CH_STATE_ALLOCATED,
+ },
+ {
+ GPI_CH_CMD_DE_ALLOC,
+ GPII_n_CH_CMD_DE_ALLOC,
+ CH_STATE_NOT_ALLOCATED,
+ },
+ {
+ GPI_CH_CMD_UART_SW_STALE,
+ GPII_n_CH_CMD_UART_SW_STALE,
+ STATE_IGNORE,
+ },
+ {
+ GPI_CH_CMD_UART_RFR_READY,
+ GPII_n_CH_CMD_UART_RFR_READY,
+ STATE_IGNORE,
+ },
+ {
+ GPI_CH_CMD_UART_RFR_NOT_READY,
+ GPII_n_CH_CMD_UART_RFR_NOT_READY,
+ STATE_IGNORE,
+ },
+ {
+ GPI_EV_CMD_ALLOCATE,
+ GPII_n_EV_CH_CMD_ALLOCATE,
+ EV_STATE_ALLOCATED,
+ },
+ {
+ GPI_EV_CMD_RESET,
+ GPII_n_EV_CH_CMD_RESET,
+ EV_STATE_ALLOCATED,
+ },
+ {
+ GPI_EV_CMD_DEALLOC,
+ GPII_n_EV_CH_CMD_DE_ALLOC,
+ EV_STATE_NOT_ALLOCATED,
+ },
+};
+
+struct gpi_ring {
+ void *pre_aligned;
+ size_t alloc_size;
+ phys_addr_t phys_addr;
+ dma_addr_t dma_handle;
+ void *base;
+ void *wp;
+ void *rp;
+ u32 len;
+ u32 el_size;
+ u32 elements;
+ bool configured;
+};
+
+struct gpi_dev {
+ struct dma_device dma_device;
+ struct device *dev;
+ struct resource *res;
+ void __iomem *regs;
+ void __iomem *ee_base; /*ee register base address*/
+ u32 max_gpii; /* maximum # of gpii instances available per gpi block */
+ u32 gpii_mask; /* gpii instances available for apps */
+ u32 ev_factor; /* ev ring length factor */
+ struct gpii *gpiis;
+};
+
+struct reg_info {
+ char *name;
+ u32 offset;
+ u32 val;
+};
+
+struct gchan {
+ struct virt_dma_chan vc;
+ u32 chid;
+ u32 seid;
+ u32 protocol;
+ struct gpii *gpii;
+ enum gpi_ch_state ch_state;
+ enum gpi_pm_state pm_state;
+ void __iomem *ch_cntxt_base_reg;
+ void __iomem *ch_cntxt_db_reg;
+ void __iomem *ch_cmd_reg;
+ u32 dir;
+ struct gpi_ring ch_ring;
+ void *config;
+};
+
+struct gpii {
+ u32 gpii_id;
+ struct gchan gchan[MAX_CHANNELS_PER_GPII];
+ struct gpi_dev *gpi_dev;
+ int irq;
+ void __iomem *regs; /* points to gpi top */
+ void __iomem *ev_cntxt_base_reg;
+ void __iomem *ev_cntxt_db_reg;
+ void __iomem *ev_ring_rp_lsb_reg;
+ void __iomem *ev_cmd_reg;
+ void __iomem *ieob_clr_reg;
+ struct mutex ctrl_lock;
+ enum gpi_ev_state ev_state;
+ bool configured_irq;
+ enum gpi_pm_state pm_state;
+ rwlock_t pm_lock;
+ struct gpi_ring ev_ring;
+ struct tasklet_struct ev_task; /* event processing tasklet */
+ struct completion cmd_completion;
+ enum gpi_cmd gpi_cmd;
+ u32 cntxt_type_irq_msk;
+ bool ieob_set;
+};
+
+#define MAX_TRE 3
+
+struct gpi_desc {
+ struct virt_dma_desc vd;
+ size_t len;
+ void *db; /* DB register to program */
+ struct gchan *gchan;
+ struct gpi_tre tre[MAX_TRE];
+ u32 num_tre;
+};
+
+static const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
+ GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
+};
+
+static irqreturn_t gpi_handle_irq(int irq, void *data);
+static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
+static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
+static void gpi_process_events(struct gpii *gpii);
+
+static inline struct gchan *to_gchan(struct dma_chan *dma_chan)
+{
+ return container_of(dma_chan, struct gchan, vc.chan);
+}
+
+static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct gpi_desc, vd);
+}
+
+static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
+ void *addr)
+{
+ return ring->phys_addr + (addr - ring->base);
+}
+
+static inline void *to_virtual(const struct gpi_ring *const ring, phys_addr_t addr)
+{
+ return ring->base + (addr - ring->phys_addr);
+}
+
+static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+
+static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
+{
+ writel_relaxed(val, addr);
+}
+
+/* gpi_write_reg_field - write to specific bit field */
+static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
+ u32 mask, u32 shift, u32 val)
+{
+ u32 tmp = gpi_read_reg(gpii, addr);
+
+ tmp &= ~mask;
+ val = tmp | ((val << shift) & mask);
+ gpi_write_reg(gpii, addr, val);
+}
+
+static inline void
+gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
+{
+ void __iomem *addr = gpii->regs + offset;
+ u32 tmp = gpi_read_reg(gpii, addr);
+
+ tmp &= ~mask;
+ tmp |= u32_encode_bits(val, mask);
+
+ gpi_write_reg(gpii, addr, tmp);
+}
+
+static void gpi_disable_interrupts(struct gpii *gpii)
+{
+ gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_INTSET_BMSK, 0);
+
+ gpii->cntxt_type_irq_msk = 0;
+ devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
+ gpii->configured_irq = false;
+}
+
+/* configure and enable interrupts */
+static int gpi_config_interrupts(struct gpii *gpii, enum gpii_irq_settings settings, bool mask)
+{
+ const u32 enable = (GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
+ GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
+ GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
+ GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
+ GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
+ int ret;
+
+ if (!gpii->configured_irq) {
+ ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
+ gpi_handle_irq, IRQF_TRIGGER_HIGH,
+ "gpi-dma", gpii);
+ if (ret < 0) {
+ dev_err(gpii->gpi_dev->dev, "error request irq:%d ret:%d\n",
+ gpii->irq, ret);
+ return ret;
+ }
+ }
+
+ if (settings == MASK_IEOB_SETTINGS) {
+ /*
+ * GPII only uses one EV ring per gpii so we can globally
+ * enable/disable IEOB interrupt
+ */
+ if (mask)
+ gpii->cntxt_type_irq_msk |= GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+ else
+ gpii->cntxt_type_irq_msk &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
+ gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, gpii->cntxt_type_irq_msk);
+ } else {
+ gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, enable);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
+ GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
+ GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
+ GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK, GPII_n_CNTXT_GPII_IRQ_EN_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_LSB_OFFS(gpii->gpii_id), U32_MAX, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_MSB_OFFS(gpii->gpii_id), U32_MAX, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id), U32_MAX, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_1_OFFS(gpii->gpii_id), U32_MAX, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_INTSET_BMSK, 1);
+ gpi_update_reg(gpii, GPII_n_ERROR_LOG_OFFS(gpii->gpii_id), U32_MAX, 0);
+
+ gpii->cntxt_type_irq_msk = enable;
+ }
+
+ gpii->configured_irq = true;
+ return 0;
+}
+
+/* Sends gpii event or channel command */
+static int gpi_send_cmd(struct gpii *gpii, struct gchan *gchan,
+ enum gpi_cmd gpi_cmd)
+{
+ u32 chid = MAX_CHANNELS_PER_GPII;
+ unsigned long timeout;
+ void __iomem *cmd_reg;
+ u32 cmd;
+
+ if (gpi_cmd >= GPI_MAX_CMD)
+ return -EINVAL;
+ if (IS_CHAN_CMD(gpi_cmd))
+ chid = gchan->chid;
+
+ dev_dbg(gpii->gpi_dev->dev,
+ "sending cmd: %s:%u\n", TO_GPI_CMD_STR(gpi_cmd), chid);
+
+ /* send opcode and wait for completion */
+ reinit_completion(&gpii->cmd_completion);
+ gpii->gpi_cmd = gpi_cmd;
+
+ cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gchan->ch_cmd_reg : gpii->ev_cmd_reg;
+ cmd = IS_CHAN_CMD(gpi_cmd) ? GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
+ GPII_n_EV_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
+ gpi_write_reg(gpii, cmd_reg, cmd);
+ timeout = wait_for_completion_timeout(&gpii->cmd_completion,
+ msecs_to_jiffies(CMD_TIMEOUT_MS));
+ if (!timeout) {
+ dev_err(gpii->gpi_dev->dev, "cmd: %s completion timeout:%u\n",
+ TO_GPI_CMD_STR(gpi_cmd), chid);
+ return -EIO;
+ }
+
+ /* confirm new ch state is correct , if the cmd is a state change cmd */
+ if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
+ return 0;
+
+ if (IS_CHAN_CMD(gpi_cmd) && gchan->ch_state == gpi_cmd_info[gpi_cmd].state)
+ return 0;
+
+ if (!IS_CHAN_CMD(gpi_cmd) && gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
+ return 0;
+
+ return -EIO;
+}
+
+/* program transfer ring DB register */
+static inline void gpi_write_ch_db(struct gchan *gchan,
+ struct gpi_ring *ring, void *wp)
+{
+ struct gpii *gpii = gchan->gpii;
+ phys_addr_t p_wp;
+
+ p_wp = to_physical(ring, wp);
+ gpi_write_reg(gpii, gchan->ch_cntxt_db_reg, p_wp);
+}
+
+/* program event ring DB register */
+static inline void gpi_write_ev_db(struct gpii *gpii,
+ struct gpi_ring *ring, void *wp)
+{
+ phys_addr_t p_wp;
+
+ p_wp = ring->phys_addr + (wp - ring->base);
+ gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, p_wp);
+}
+
+/* process transfer completion interrupt */
+static void gpi_process_ieob(struct gpii *gpii)
+{
+ gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
+
+ gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
+ tasklet_hi_schedule(&gpii->ev_task);
+}
+
+/* process channel control interrupt */
+static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
+{
+ u32 gpii_id = gpii->gpii_id;
+ u32 offset = GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
+ u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
+ struct gchan *gchan;
+ u32 chid, state;
+
+ /* clear the status */
+ offset = GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
+
+ for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
+ if (!(BIT(chid) & ch_irq))
+ continue;
+
+ gchan = &gpii->gchan[chid];
+ state = gpi_read_reg(gpii, gchan->ch_cntxt_base_reg +
+ CNTXT_0_CONFIG);
+ state = FIELD_GET(GPII_n_CH_k_CNTXT_0_CHSTATE, state);
+
+ /*
+ * CH_CMD_DEALLOC cmd always successful. However cmd does
+ * not change hardware status. So overwriting software state
+ * to default state.
+ */
+ if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
+ state = DEFAULT_CH_STATE;
+ gchan->ch_state = state;
+
+ /*
+ * Triggering complete all if ch_state is not a stop in process.
+ * Stop in process is a transition state and we will wait for
+ * stop interrupt before notifying.
+ */
+ if (gchan->ch_state != CH_STATE_STOP_IN_PROC)
+ complete_all(&gpii->cmd_completion);
+ }
+}
+
+/* processing gpi general error interrupts */
+static void gpi_process_gen_err_irq(struct gpii *gpii)
+{
+ u32 gpii_id = gpii->gpii_id;
+ u32 offset = GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id);
+ u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
+
+ /* clear the status */
+ dev_dbg(gpii->gpi_dev->dev, "irq_stts:0x%x\n", irq_stts);
+
+ /* Clear the register */
+ offset = GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
+}
+
+/* processing gpi level error interrupts */
+static void gpi_process_glob_err_irq(struct gpii *gpii)
+{
+ u32 gpii_id = gpii->gpii_id;
+ u32 offset = GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
+ u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
+
+ offset = GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
+
+ /* only error interrupt should be set */
+ if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
+ dev_err(gpii->gpi_dev->dev, "invalid error status:0x%x\n", irq_stts);
+ return;
+ }
+
+ offset = GPII_n_ERROR_LOG_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, 0);
+}
+
+/* gpii interrupt handler */
+static irqreturn_t gpi_handle_irq(int irq, void *data)
+{
+ struct gpii *gpii = data;
+ u32 gpii_id = gpii->gpii_id;
+ u32 type, offset;
+ unsigned long flags;
+
+ read_lock_irqsave(&gpii->pm_lock, flags);
+
+ /*
+ * States are out of sync to receive interrupt
+ * while software state is in DISABLE state, bailing out.
+ */
+ if (!REG_ACCESS_VALID(gpii->pm_state)) {
+ dev_err(gpii->gpi_dev->dev, "receive interrupt while in %s state\n",
+ TO_GPI_PM_STR(gpii->pm_state));
+ goto exit_irq;
+ }
+
+ offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
+ type = gpi_read_reg(gpii, gpii->regs + offset);
+
+ do {
+ /* global gpii error */
+ if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
+ gpi_process_glob_err_irq(gpii);
+ type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
+ }
+
+ /* transfer complete interrupt */
+ if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
+ gpi_process_ieob(gpii);
+ type &= ~GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+ }
+
+ /* event control irq */
+ if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
+ u32 ev_state;
+ u32 ev_ch_irq;
+
+ dev_dbg(gpii->gpi_dev->dev,
+ "processing EV CTRL interrupt\n");
+ offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
+ ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
+
+ offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
+ (gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
+ ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
+ CNTXT_0_CONFIG);
+ ev_state = FIELD_GET(GPII_n_EV_k_CNTXT_0_CHSTATE, ev_state);
+
+ /*
+ * CMD EV_CMD_DEALLOC is always successful. However
+ * cmd does not change hardware status. So overwriting
+ * software state to default state.
+ */
+ if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
+ ev_state = DEFAULT_EV_CH_STATE;
+
+ gpii->ev_state = ev_state;
+ dev_dbg(gpii->gpi_dev->dev, "setting EV state to %s\n",
+ TO_GPI_EV_STATE_STR(gpii->ev_state));
+ complete_all(&gpii->cmd_completion);
+ type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
+ }
+
+ /* channel control irq */
+ if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
+ dev_dbg(gpii->gpi_dev->dev, "process CH CTRL interrupts\n");
+ gpi_process_ch_ctrl_irq(gpii);
+ type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
+ }
+
+ if (type) {
+ dev_err(gpii->gpi_dev->dev, "Unhandled interrupt status:0x%x\n", type);
+ gpi_process_gen_err_irq(gpii);
+ goto exit_irq;
+ }
+
+ offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
+ type = gpi_read_reg(gpii, gpii->regs + offset);
+ } while (type);
+
+exit_irq:
+ read_unlock_irqrestore(&gpii->pm_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/* process DMA Immediate completion data events */
+static void gpi_process_imed_data_event(struct gchan *gchan,
+ struct immediate_data_event *imed_event)
+{
+ struct gpii *gpii = gchan->gpii;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ void *tre = ch_ring->base + (ch_ring->el_size * imed_event->tre_index);
+ struct dmaengine_result result;
+ struct gpi_desc *gpi_desc;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ u32 chid;
+
+ /*
+ * If channel not active don't process event
+ */
+ if (gchan->pm_state != ACTIVE_STATE) {
+ dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
+ TO_GPI_PM_STR(gchan->pm_state));
+ return;
+ }
+
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ vd = vchan_next_desc(&gchan->vc);
+ if (!vd) {
+ struct gpi_ere *gpi_ere;
+ struct gpi_tre *gpi_tre;
+
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ dev_dbg(gpii->gpi_dev->dev, "event without a pending descriptor!\n");
+ gpi_ere = (struct gpi_ere *)imed_event;
+ dev_dbg(gpii->gpi_dev->dev,
+ "Event: %08x %08x %08x %08x\n",
+ gpi_ere->dword[0], gpi_ere->dword[1],
+ gpi_ere->dword[2], gpi_ere->dword[3]);
+ gpi_tre = tre;
+ dev_dbg(gpii->gpi_dev->dev,
+ "Pending TRE: %08x %08x %08x %08x\n",
+ gpi_tre->dword[0], gpi_tre->dword[1],
+ gpi_tre->dword[2], gpi_tre->dword[3]);
+ return;
+ }
+ gpi_desc = to_gpi_desc(vd);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+
+ /*
+ * RP pointed by Event is to last TRE processed,
+ * we need to update ring rp to tre + 1
+ */
+ tre += ch_ring->el_size;
+ if (tre >= (ch_ring->base + ch_ring->len))
+ tre = ch_ring->base;
+ ch_ring->rp = tre;
+
+ /* make sure rp updates are immediately visible to all cores */
+ smp_wmb();
+
+ chid = imed_event->chid;
+ if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
+ if (chid == GPI_RX_CHAN)
+ goto gpi_free_desc;
+ else
+ return;
+ }
+
+ if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR)
+ result.result = DMA_TRANS_ABORTED;
+ else
+ result.result = DMA_TRANS_NOERROR;
+ result.residue = gpi_desc->len - imed_event->length;
+
+ dma_cookie_complete(&vd->tx);
+ dmaengine_desc_get_callback_invoke(&vd->tx, &result);
+
+gpi_free_desc:
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ list_del(&vd->node);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ kfree(gpi_desc);
+ gpi_desc = NULL;
+}
+
+/* processing transfer completion events */
+static void gpi_process_xfer_compl_event(struct gchan *gchan,
+ struct xfer_compl_event *compl_event)
+{
+ struct gpii *gpii = gchan->gpii;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
+ struct virt_dma_desc *vd;
+ struct gpi_desc *gpi_desc;
+ struct dmaengine_result result;
+ unsigned long flags;
+ u32 chid;
+
+ /* only process events on active channel */
+ if (unlikely(gchan->pm_state != ACTIVE_STATE)) {
+ dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
+ TO_GPI_PM_STR(gchan->pm_state));
+ return;
+ }
+
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ vd = vchan_next_desc(&gchan->vc);
+ if (!vd) {
+ struct gpi_ere *gpi_ere;
+
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ dev_err(gpii->gpi_dev->dev, "Event without a pending descriptor!\n");
+ gpi_ere = (struct gpi_ere *)compl_event;
+ dev_err(gpii->gpi_dev->dev,
+ "Event: %08x %08x %08x %08x\n",
+ gpi_ere->dword[0], gpi_ere->dword[1],
+ gpi_ere->dword[2], gpi_ere->dword[3]);
+ return;
+ }
+
+ gpi_desc = to_gpi_desc(vd);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+
+ /*
+ * RP pointed by Event is to last TRE processed,
+ * we need to update ring rp to ev_rp + 1
+ */
+ ev_rp += ch_ring->el_size;
+ if (ev_rp >= (ch_ring->base + ch_ring->len))
+ ev_rp = ch_ring->base;
+ ch_ring->rp = ev_rp;
+
+ /* update must be visible to other cores */
+ smp_wmb();
+
+ chid = compl_event->chid;
+ if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
+ if (chid == GPI_RX_CHAN)
+ goto gpi_free_desc;
+ else
+ return;
+ }
+
+ if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR) {
+ dev_err(gpii->gpi_dev->dev, "Error in Transaction\n");
+ result.result = DMA_TRANS_ABORTED;
+ } else {
+ dev_dbg(gpii->gpi_dev->dev, "Transaction Success\n");
+ result.result = DMA_TRANS_NOERROR;
+ }
+ result.residue = gpi_desc->len - compl_event->length;
+ dev_dbg(gpii->gpi_dev->dev, "Residue %d\n", result.residue);
+
+ dma_cookie_complete(&vd->tx);
+ dmaengine_desc_get_callback_invoke(&vd->tx, &result);
+
+gpi_free_desc:
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ list_del(&vd->node);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ kfree(gpi_desc);
+ gpi_desc = NULL;
+}
+
+/* process all events */
+static void gpi_process_events(struct gpii *gpii)
+{
+ struct gpi_ring *ev_ring = &gpii->ev_ring;
+ phys_addr_t cntxt_rp;
+ void *rp;
+ union gpi_event *gpi_event;
+ struct gchan *gchan;
+ u32 chid, type;
+
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+ rp = to_virtual(ev_ring, cntxt_rp);
+
+ do {
+ while (rp != ev_ring->rp) {
+ gpi_event = ev_ring->rp;
+ chid = gpi_event->xfer_compl_event.chid;
+ type = gpi_event->xfer_compl_event.type;
+
+ dev_dbg(gpii->gpi_dev->dev,
+ "Event: CHID:%u, type:%x %08x %08x %08x %08x\n",
+ chid, type, gpi_event->gpi_ere.dword[0],
+ gpi_event->gpi_ere.dword[1], gpi_event->gpi_ere.dword[2],
+ gpi_event->gpi_ere.dword[3]);
+
+ switch (type) {
+ case XFER_COMPLETE_EV_TYPE:
+ gchan = &gpii->gchan[chid];
+ gpi_process_xfer_compl_event(gchan,
+ &gpi_event->xfer_compl_event);
+ break;
+ case STALE_EV_TYPE:
+ dev_dbg(gpii->gpi_dev->dev, "stale event, not processing\n");
+ break;
+ case IMMEDIATE_DATA_EV_TYPE:
+ gchan = &gpii->gchan[chid];
+ gpi_process_imed_data_event(gchan,
+ &gpi_event->immediate_data_event);
+ break;
+ case QUP_NOTIF_EV_TYPE:
+ dev_dbg(gpii->gpi_dev->dev, "QUP_NOTIF_EV_TYPE\n");
+ break;
+ default:
+ dev_dbg(gpii->gpi_dev->dev,
+ "not supported event type:0x%x\n", type);
+ }
+ gpi_ring_recycle_ev_element(ev_ring);
+ }
+ gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
+
+ /* clear pending IEOB events */
+ gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
+
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+ rp = to_virtual(ev_ring, cntxt_rp);
+
+ } while (rp != ev_ring->rp);
+}
+
+/* processing events using tasklet */
+static void gpi_ev_tasklet(unsigned long data)
+{
+ struct gpii *gpii = (struct gpii *)data;
+
+ read_lock_bh(&gpii->pm_lock);
+ if (!REG_ACCESS_VALID(gpii->pm_state)) {
+ read_unlock_bh(&gpii->pm_lock);
+ dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n",
+ TO_GPI_PM_STR(gpii->pm_state));
+ return;
+ }
+
+ /* process the events */
+ gpi_process_events(gpii);
+
+ /* enable IEOB, switching back to interrupts */
+ gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
+ read_unlock_bh(&gpii->pm_lock);
+}
+
+/* marks all pending events for the channel as stale */
+static void gpi_mark_stale_events(struct gchan *gchan)
+{
+ struct gpii *gpii = gchan->gpii;
+ struct gpi_ring *ev_ring = &gpii->ev_ring;
+ u32 cntxt_rp, local_rp;
+ void *ev_rp;
+
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+
+ ev_rp = ev_ring->rp;
+ local_rp = (u32)to_physical(ev_ring, ev_rp);
+ while (local_rp != cntxt_rp) {
+ union gpi_event *gpi_event = ev_rp;
+ u32 chid = gpi_event->xfer_compl_event.chid;
+
+ if (chid == gchan->chid)
+ gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
+ ev_rp += ev_ring->el_size;
+ if (ev_rp >= (ev_ring->base + ev_ring->len))
+ ev_rp = ev_ring->base;
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+ local_rp = (u32)to_physical(ev_ring, ev_rp);
+ }
+}
+
+/* reset sw state and issue channel reset or de-alloc */
+static int gpi_reset_chan(struct gchan *gchan, enum gpi_cmd gpi_cmd)
+{
+ struct gpii *gpii = gchan->gpii;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ unsigned long flags;
+ LIST_HEAD(list);
+ int ret;
+
+ ret = gpi_send_cmd(gpii, gchan, gpi_cmd);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(gpi_cmd), ret);
+ return ret;
+ }
+
+ /* initialize the local ring ptrs */
+ ch_ring->rp = ch_ring->base;
+ ch_ring->wp = ch_ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+
+ /* check event ring for any stale events */
+ write_lock_irq(&gpii->pm_lock);
+ gpi_mark_stale_events(gchan);
+
+ /* remove all async descriptors */
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ vchan_get_all_descriptors(&gchan->vc, &list);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ write_unlock_irq(&gpii->pm_lock);
+ vchan_dma_desc_free_list(&gchan->vc, &list);
+
+ return 0;
+}
+
+static int gpi_start_chan(struct gchan *gchan)
+{
+ struct gpii *gpii = gchan->gpii;
+ int ret;
+
+ ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_START);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
+ return ret;
+ }
+
+ /* gpii CH is active now */
+ write_lock_irq(&gpii->pm_lock);
+ gchan->pm_state = ACTIVE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ return 0;
+}
+
+static int gpi_stop_chan(struct gchan *gchan)
+{
+ struct gpii *gpii = gchan->gpii;
+ int ret;
+
+ ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_STOP);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_CH_CMD_STOP), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* allocate and configure the transfer channel */
+static int gpi_alloc_chan(struct gchan *chan, bool send_alloc_cmd)
+{
+ struct gpii *gpii = chan->gpii;
+ struct gpi_ring *ring = &chan->ch_ring;
+ int ret;
+ u32 id = gpii->gpii_id;
+ u32 chid = chan->chid;
+ u32 pair_chid = !chid;
+
+ if (send_alloc_cmd) {
+ ret = gpi_send_cmd(gpii, chan, GPI_CH_CMD_ALLOCATE);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
+ return ret;
+ }
+ }
+
+ gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_0_CONFIG,
+ GPII_n_CH_k_CNTXT_0(ring->el_size, 0, chan->dir, GPI_CHTYPE_PROTO_GPI));
+ gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_1_R_LENGTH, ring->len);
+ gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_2_RING_BASE_LSB, ring->phys_addr);
+ gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_3_RING_BASE_MSB,
+ upper_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, chan->ch_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
+ upper_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid),
+ GPII_n_CH_k_SCRATCH_0(pair_chid, chan->protocol, chan->seid));
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_1_OFFS(id, chid), 0);
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_2_OFFS(id, chid), 0);
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_3_OFFS(id, chid), 0);
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_QOS_OFFS(id, chid), 1);
+
+ /* flush all the writes */
+ wmb();
+ return 0;
+}
+
+/* allocate and configure event ring */
+static int gpi_alloc_ev_chan(struct gpii *gpii)
+{
+ struct gpi_ring *ring = &gpii->ev_ring;
+ void __iomem *base = gpii->ev_cntxt_base_reg;
+ int ret;
+
+ ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
+ return ret;
+ }
+
+ /* program event context */
+ gpi_write_reg(gpii, base + CNTXT_0_CONFIG,
+ GPII_n_EV_k_CNTXT_0(ring->el_size, GPI_INTTYPE_IRQ, GPI_CHTYPE_GPI_EV));
+ gpi_write_reg(gpii, base + CNTXT_1_R_LENGTH, ring->len);
+ gpi_write_reg(gpii, base + CNTXT_2_RING_BASE_LSB, lower_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, base + CNTXT_3_RING_BASE_MSB, upper_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, gpii->ev_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
+ upper_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
+ gpi_write_reg(gpii, base + CNTXT_10_RING_MSI_LSB, 0);
+ gpi_write_reg(gpii, base + CNTXT_11_RING_MSI_MSB, 0);
+ gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
+ gpi_write_reg(gpii, base + CNTXT_12_RING_RP_UPDATE_LSB, 0);
+ gpi_write_reg(gpii, base + CNTXT_13_RING_RP_UPDATE_MSB, 0);
+
+ /* add events to ring */
+ ring->wp = (ring->base + ring->len - ring->el_size);
+
+ /* flush all the writes */
+ wmb();
+
+ /* gpii is active now */
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = ACTIVE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+ gpi_write_ev_db(gpii, ring, ring->wp);
+
+ return 0;
+}
+
+/* calculate # of ERE/TRE available to queue */
+static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
+{
+ int elements = 0;
+
+ if (ring->wp < ring->rp) {
+ elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
+ } else {
+ elements = (ring->rp - ring->base) / ring->el_size;
+ elements += ((ring->base + ring->len - ring->wp) / ring->el_size) - 1;
+ }
+
+ return elements;
+}
+
+static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
+{
+ if (gpi_ring_num_elements_avail(ring) <= 0)
+ return -ENOMEM;
+
+ *wp = ring->wp;
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+
+ return 0;
+}
+
+static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
+{
+ /* Update the WP */
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+
+ /* Update the RP */
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+}
+
+static void gpi_free_ring(struct gpi_ring *ring,
+ struct gpii *gpii)
+{
+ dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ memset(ring, 0, sizeof(*ring));
+}
+
+/* allocate memory for transfer and event rings */
+static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
+ u32 el_size, struct gpii *gpii)
+{
+ u64 len = elements * el_size;
+ int bit;
+
+ /* ring len must be power of 2 */
+ bit = find_last_bit((unsigned long *)&len, 32);
+ if (((1 << bit) - 1) & len)
+ bit++;
+ len = 1 << bit;
+ ring->alloc_size = (len + (len - 1));
+ dev_dbg(gpii->gpi_dev->dev,
+ "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
+ elements, el_size, (elements * el_size), len,
+ ring->alloc_size);
+
+ ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
+ ring->alloc_size,
+ &ring->dma_handle, GFP_KERNEL);
+ if (!ring->pre_aligned) {
+ dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
+ ring->alloc_size);
+ return -ENOMEM;
+ }
+
+ /* align the physical mem */
+ ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
+ ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle);
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ ring->len = len;
+ ring->el_size = el_size;
+ ring->elements = ring->len / ring->el_size;
+ memset(ring->base, 0, ring->len);
+ ring->configured = true;
+
+ /* update to other cores */
+ smp_wmb();
+
+ dev_dbg(gpii->gpi_dev->dev,
+ "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
+ &ring->dma_handle, &ring->phys_addr, ring->len,
+ ring->el_size, ring->elements);
+
+ return 0;
+}
+
+/* copy tre into transfer ring */
+static void gpi_queue_xfer(struct gpii *gpii, struct gchan *gchan,
+ struct gpi_tre *gpi_tre, void **wp)
+{
+ struct gpi_tre *ch_tre;
+ int ret;
+
+ /* get next tre location we can copy */
+ ret = gpi_ring_add_element(&gchan->ch_ring, (void **)&ch_tre);
+ if (unlikely(ret)) {
+ dev_err(gpii->gpi_dev->dev, "Error adding ring element to xfer ring\n");
+ return;
+ }
+
+ /* copy the tre info */
+ memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
+ *wp = ch_tre;
+}
+
+/* reset and restart transfer channel */
+static int gpi_terminate_all(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ int schid, echid, i;
+ int ret = 0;
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ /*
+ * treat both channels as a group if its protocol is not UART
+ * STOP, RESET, or START needs to be in lockstep
+ */
+ schid = (gchan->protocol == QCOM_GPI_UART) ? gchan->chid : 0;
+ echid = (gchan->protocol == QCOM_GPI_UART) ? schid + 1 : MAX_CHANNELS_PER_GPII;
+
+ /* stop the channel */
+ for (i = schid; i < echid; i++) {
+ gchan = &gpii->gchan[i];
+
+ /* disable ch state so no more TRE processing */
+ write_lock_irq(&gpii->pm_lock);
+ gchan->pm_state = PREPARE_TERMINATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* send command to Stop the channel */
+ ret = gpi_stop_chan(gchan);
+ }
+
+ /* reset the channels (clears any pending tre) */
+ for (i = schid; i < echid; i++) {
+ gchan = &gpii->gchan[i];
+
+ ret = gpi_reset_chan(gchan, GPI_CH_CMD_RESET);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error resetting channel ret:%d\n", ret);
+ goto terminate_exit;
+ }
+
+ /* reprogram channel CNTXT */
+ ret = gpi_alloc_chan(gchan, false);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error alloc_channel ret:%d\n", ret);
+ goto terminate_exit;
+ }
+ }
+
+ /* restart the channels */
+ for (i = schid; i < echid; i++) {
+ gchan = &gpii->gchan[i];
+
+ ret = gpi_start_chan(gchan);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error Starting Channel ret:%d\n", ret);
+ goto terminate_exit;
+ }
+ }
+
+terminate_exit:
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+}
+
+/* pause dma transfer for all channels */
+static int gpi_pause(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ int i, ret;
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ /*
+ * pause/resume are per gpii not per channel, so
+ * client needs to call pause only once
+ */
+ if (gpii->pm_state == PAUSE_STATE) {
+ dev_dbg(gpii->gpi_dev->dev, "channel is already paused\n");
+ mutex_unlock(&gpii->ctrl_lock);
+ return 0;
+ }
+
+ /* send stop command to stop the channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_stop_chan(&gpii->gchan[i]);
+ if (ret) {
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+ }
+ }
+
+ disable_irq(gpii->irq);
+
+ /* Wait for threads to complete out */
+ tasklet_kill(&gpii->ev_task);
+
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = PAUSE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return 0;
+}
+
+/* resume dma transfer */
+static int gpi_resume(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ int i, ret;
+
+ mutex_lock(&gpii->ctrl_lock);
+ if (gpii->pm_state == ACTIVE_STATE) {
+ dev_dbg(gpii->gpi_dev->dev, "channel is already active\n");
+ mutex_unlock(&gpii->ctrl_lock);
+ return 0;
+ }
+
+ enable_irq(gpii->irq);
+
+ /* send start command to start the channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_send_cmd(gpii, &gpii->gchan[i], GPI_CH_CMD_START);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error starting chan, ret:%d\n", ret);
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+ }
+ }
+
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = ACTIVE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return 0;
+}
+
+static void gpi_desc_free(struct virt_dma_desc *vd)
+{
+ struct gpi_desc *gpi_desc = to_gpi_desc(vd);
+
+ kfree(gpi_desc);
+ gpi_desc = NULL;
+}
+
+static int
+gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
+{
+ struct gchan *gchan = to_gchan(chan);
+
+ if (!config->peripheral_config)
+ return -EINVAL;
+
+ gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
+ if (!gchan->config)
+ return -ENOMEM;
+
+ memcpy(gchan->config, config->peripheral_config, config->peripheral_size);
+
+ return 0;
+}
+
+static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
+ struct scatterlist *sgl, enum dma_transfer_direction direction)
+{
+ struct gpi_i2c_config *i2c = chan->config;
+ struct device *dev = chan->gpii->gpi_dev->dev;
+ unsigned int tre_idx = 0;
+ dma_addr_t address;
+ struct gpi_tre *tre;
+ unsigned int i;
+
+ /* first create config tre if applicable */
+ if (i2c->set_config) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ tre->dword[0] = u32_encode_bits(i2c->low_count, TRE_I2C_C0_TLOW);
+ tre->dword[0] |= u32_encode_bits(i2c->high_count, TRE_I2C_C0_THIGH);
+ tre->dword[0] |= u32_encode_bits(i2c->cycle_count, TRE_I2C_C0_TCYL);
+ tre->dword[0] |= u32_encode_bits(i2c->pack_enable, TRE_I2C_C0_TX_PACK);
+ tre->dword[0] |= u32_encode_bits(i2c->pack_enable, TRE_I2C_C0_RX_PACK);
+
+ tre->dword[1] = 0;
+
+ tre->dword[2] = u32_encode_bits(i2c->clk_div, TRE_C0_CLK_DIV);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ }
+
+ /* create the GO tre for Tx */
+ if (i2c->op == I2C_WRITE) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ if (i2c->multi_msg)
+ tre->dword[0] = u32_encode_bits(I2C_READ, TRE_I2C_GO_CMD);
+ else
+ tre->dword[0] = u32_encode_bits(i2c->op, TRE_I2C_GO_CMD);
+
+ tre->dword[0] |= u32_encode_bits(i2c->addr, TRE_I2C_GO_ADDR);
+ tre->dword[0] |= u32_encode_bits(i2c->stretch, TRE_I2C_GO_STRETCH);
+
+ tre->dword[1] = 0;
+ tre->dword[2] = u32_encode_bits(i2c->rx_len, TRE_RX_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
+
+ if (i2c->multi_msg)
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ else
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ }
+
+ if (i2c->op == I2C_READ || i2c->multi_msg == false) {
+ /* create the DMA TRE */
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ address = sg_dma_address(sgl);
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+
+ tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+ };
+
+ for (i = 0; i < tre_idx; i++)
+ dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
+ desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]);
+
+ return tre_idx;
+}
+
+static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
+ struct scatterlist *sgl, enum dma_transfer_direction direction)
+{
+ struct gpi_spi_config *spi = chan->config;
+ struct device *dev = chan->gpii->gpi_dev->dev;
+ unsigned int tre_idx = 0;
+ dma_addr_t address;
+ struct gpi_tre *tre;
+ unsigned int i;
+
+ /* first create config tre if applicable */
+ if (direction == DMA_MEM_TO_DEV && spi->set_config) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ tre->dword[0] = u32_encode_bits(spi->word_len, TRE_SPI_C0_WORD_SZ);
+ tre->dword[0] |= u32_encode_bits(spi->loopback_en, TRE_SPI_C0_LOOPBACK);
+ tre->dword[0] |= u32_encode_bits(spi->clock_pol_high, TRE_SPI_C0_CPOL);
+ tre->dword[0] |= u32_encode_bits(spi->data_pol_high, TRE_SPI_C0_CPHA);
+ tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_TX_PACK);
+ tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_RX_PACK);
+
+ tre->dword[1] = 0;
+
+ tre->dword[2] = u32_encode_bits(spi->clk_div, TRE_C0_CLK_DIV);
+ tre->dword[2] |= u32_encode_bits(spi->clk_src, TRE_C0_CLK_SRC);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ }
+
+ /* create the GO tre for Tx */
+ if (direction == DMA_MEM_TO_DEV) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ tre->dword[0] = u32_encode_bits(spi->fragmentation, TRE_SPI_GO_FRAG);
+ tre->dword[0] |= u32_encode_bits(spi->cs, TRE_SPI_GO_CS);
+ tre->dword[0] |= u32_encode_bits(spi->cmd, TRE_SPI_GO_CMD);
+
+ tre->dword[1] = 0;
+
+ tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
+ if (spi->cmd == SPI_RX)
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
+ else
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ }
+
+ /* create the dma tre */
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ address = sg_dma_address(sgl);
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+
+ tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ if (direction == DMA_MEM_TO_DEV)
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+
+ for (i = 0; i < tre_idx; i++)
+ dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
+ desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]);
+
+ return tre_idx;
+}
+
+/* copy tre into transfer ring */
+static struct dma_async_tx_descriptor *
+gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ struct device *dev = gpii->gpi_dev->dev;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ struct gpi_desc *gpi_desc;
+ u32 nr, nr_tre = 0;
+ u8 set_config;
+ int i;
+
+ gpii->ieob_set = false;
+ if (!is_slave_direction(direction)) {
+ dev_err(gpii->gpi_dev->dev, "invalid dma direction: %d\n", direction);
+ return NULL;
+ }
+
+ if (sg_len > 1) {
+ dev_err(dev, "Multi sg sent, we support only one atm: %d\n", sg_len);
+ return NULL;
+ }
+
+ nr_tre = 3;
+ set_config = *(u32 *)gchan->config;
+ if (!set_config)
+ nr_tre = 2;
+ if (direction == DMA_DEV_TO_MEM) /* rx */
+ nr_tre = 1;
+
+ /* calculate # of elements required & available */
+ nr = gpi_ring_num_elements_avail(ch_ring);
+ if (nr < nr_tre) {
+ dev_err(dev, "not enough space in ring, avail:%u required:%u\n", nr, nr_tre);
+ return NULL;
+ }
+
+ gpi_desc = kzalloc(sizeof(*gpi_desc), GFP_NOWAIT);
+ if (!gpi_desc)
+ return NULL;
+
+ /* create TREs for xfer */
+ if (gchan->protocol == QCOM_GPI_SPI) {
+ i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction);
+ } else if (gchan->protocol == QCOM_GPI_I2C) {
+ i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction);
+ } else {
+ dev_err(dev, "invalid peripheral: %d\n", gchan->protocol);
+ kfree(gpi_desc);
+ return NULL;
+ }
+
+ /* set up the descriptor */
+ gpi_desc->gchan = gchan;
+ gpi_desc->len = sg_dma_len(sgl);
+ gpi_desc->num_tre = i;
+
+ return vchan_tx_prep(&gchan->vc, &gpi_desc->vd, flags);
+}
+
+/* rings transfer ring db to being transfer */
+static void gpi_issue_pending(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ unsigned long flags, pm_lock_flags;
+ struct virt_dma_desc *vd = NULL;
+ struct gpi_desc *gpi_desc;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ void *tre, *wp = NULL;
+ int i;
+
+ read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
+
+ /* move all submitted discriptors to issued list */
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ if (vchan_issue_pending(&gchan->vc))
+ vd = list_last_entry(&gchan->vc.desc_issued,
+ struct virt_dma_desc, node);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+
+ /* nothing to do list is empty */
+ if (!vd) {
+ read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
+ return;
+ }
+
+ gpi_desc = to_gpi_desc(vd);
+ for (i = 0; i < gpi_desc->num_tre; i++) {
+ tre = &gpi_desc->tre[i];
+ gpi_queue_xfer(gpii, gchan, tre, &wp);
+ }
+
+ gpi_desc->db = ch_ring->wp;
+ gpi_write_ch_db(gchan, &gchan->ch_ring, gpi_desc->db);
+ read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
+}
+
+static int gpi_ch_init(struct gchan *gchan)
+{
+ struct gpii *gpii = gchan->gpii;
+ const int ev_factor = gpii->gpi_dev->ev_factor;
+ u32 elements;
+ int i = 0, ret = 0;
+
+ gchan->pm_state = CONFIG_STATE;
+
+ /* check if both channels are configured before continue */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
+ if (gpii->gchan[i].pm_state != CONFIG_STATE)
+ goto exit_gpi_init;
+
+ /* protocol must be same for both channels */
+ if (gpii->gchan[0].protocol != gpii->gchan[1].protocol) {
+ dev_err(gpii->gpi_dev->dev, "protocol did not match protocol %u != %u\n",
+ gpii->gchan[0].protocol, gpii->gchan[1].protocol);
+ ret = -EINVAL;
+ goto exit_gpi_init;
+ }
+
+ /* allocate memory for event ring */
+ elements = CHAN_TRES << ev_factor;
+ ret = gpi_alloc_ring(&gpii->ev_ring, elements,
+ sizeof(union gpi_event), gpii);
+ if (ret)
+ goto exit_gpi_init;
+
+ /* configure interrupts */
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = PREPARE_HARDWARE;
+ write_unlock_irq(&gpii->pm_lock);
+ ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "error config. interrupts, ret:%d\n", ret);
+ goto error_config_int;
+ }
+
+ /* allocate event rings */
+ ret = gpi_alloc_ev_chan(gpii);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "error alloc_ev_chan:%d\n", ret);
+ goto error_alloc_ev_ring;
+ }
+
+ /* Allocate all channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_alloc_chan(&gpii->gchan[i], true);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error allocating chan:%d\n", ret);
+ goto error_alloc_chan;
+ }
+ }
+
+ /* start channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_start_chan(&gpii->gchan[i]);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error start chan:%d\n", ret);
+ goto error_start_chan;
+ }
+ }
+ return ret;
+
+error_start_chan:
+ for (i = i - 1; i >= 0; i--) {
+ gpi_stop_chan(&gpii->gchan[i]);
+ gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
+ }
+ i = 2;
+error_alloc_chan:
+ for (i = i - 1; i >= 0; i--)
+ gpi_reset_chan(gchan, GPI_CH_CMD_DE_ALLOC);
+error_alloc_ev_ring:
+ gpi_disable_interrupts(gpii);
+error_config_int:
+ gpi_free_ring(&gpii->ev_ring, gpii);
+exit_gpi_init:
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+}
+
+/* release all channel resources */
+static void gpi_free_chan_resources(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ enum gpi_pm_state cur_state;
+ int ret, i;
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ cur_state = gchan->pm_state;
+
+ /* disable ch state so no more TRE processing for this channel */
+ write_lock_irq(&gpii->pm_lock);
+ gchan->pm_state = PREPARE_TERMINATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* attempt to do graceful hardware shutdown */
+ if (cur_state == ACTIVE_STATE) {
+ gpi_stop_chan(gchan);
+
+ ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
+ if (ret)
+ dev_err(gpii->gpi_dev->dev, "error resetting channel:%d\n", ret);
+
+ gpi_reset_chan(gchan, GPI_CH_CMD_DE_ALLOC);
+ }
+
+ /* free all allocated memory */
+ gpi_free_ring(&gchan->ch_ring, gpii);
+ vchan_free_chan_resources(&gchan->vc);
+ kfree(gchan->config);
+
+ write_lock_irq(&gpii->pm_lock);
+ gchan->pm_state = DISABLE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* if other rings are still active exit */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
+ if (gpii->gchan[i].ch_ring.configured)
+ goto exit_free;
+
+ /* deallocate EV Ring */
+ cur_state = gpii->pm_state;
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = PREPARE_TERMINATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* wait for threads to complete out */
+ tasklet_kill(&gpii->ev_task);
+
+ /* send command to de allocate event ring */
+ if (cur_state == ACTIVE_STATE)
+ gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
+
+ gpi_free_ring(&gpii->ev_ring, gpii);
+
+ /* disable interrupts */
+ if (cur_state == ACTIVE_STATE)
+ gpi_disable_interrupts(gpii);
+
+ /* set final state to disable */
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = DISABLE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+exit_free:
+ mutex_unlock(&gpii->ctrl_lock);
+}
+
+/* allocate channel resources */
+static int gpi_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ int ret;
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ /* allocate memory for transfer ring */
+ ret = gpi_alloc_ring(&gchan->ch_ring, CHAN_TRES,
+ sizeof(struct gpi_tre), gpii);
+ if (ret)
+ goto xfer_alloc_err;
+
+ ret = gpi_ch_init(gchan);
+
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return ret;
+xfer_alloc_err:
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return ret;
+}
+
+static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
+{
+ struct gchan *tx_chan, *rx_chan;
+ unsigned int gpii;
+
+ /* check if same seid is already configured for another chid */
+ for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
+ if (!((1 << gpii) & gpi_dev->gpii_mask))
+ continue;
+
+ tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
+ rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
+
+ if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
+ return gpii;
+ if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
+ return gpii;
+ }
+
+ /* no channels configured with same seid, return next avail gpii */
+ for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
+ if (!((1 << gpii) & gpi_dev->gpii_mask))
+ continue;
+
+ tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
+ rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
+
+ /* check if gpii is configured */
+ if (tx_chan->vc.chan.client_count ||
+ rx_chan->vc.chan.client_count)
+ continue;
+
+ /* found a free gpii */
+ return gpii;
+ }
+
+ /* no gpii instance available to use */
+ return -EIO;
+}
+
+/* gpi_of_dma_xlate: open client requested channel */
+static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
+ struct of_dma *of_dma)
+{
+ struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
+ u32 seid, chid;
+ int gpii;
+ struct gchan *gchan;
+
+ if (args->args_count < 3) {
+ dev_err(gpi_dev->dev, "gpii require minimum 2 args, client passed:%d args\n",
+ args->args_count);
+ return NULL;
+ }
+
+ chid = args->args[0];
+ if (chid >= MAX_CHANNELS_PER_GPII) {
+ dev_err(gpi_dev->dev, "gpii channel:%d not valid\n", chid);
+ return NULL;
+ }
+
+ seid = args->args[1];
+
+ /* find next available gpii to use */
+ gpii = gpi_find_avail_gpii(gpi_dev, seid);
+ if (gpii < 0) {
+ dev_err(gpi_dev->dev, "no available gpii instances\n");
+ return NULL;
+ }
+
+ gchan = &gpi_dev->gpiis[gpii].gchan[chid];
+ if (gchan->vc.chan.client_count) {
+ dev_err(gpi_dev->dev, "gpii:%d chid:%d seid:%d already configured\n",
+ gpii, chid, gchan->seid);
+ return NULL;
+ }
+
+ gchan->seid = seid;
+ gchan->protocol = args->args[2];
+
+ return dma_get_slave_channel(&gchan->vc.chan);
+}
+
+static int gpi_probe(struct platform_device *pdev)
+{
+ struct gpi_dev *gpi_dev;
+ unsigned int i;
+ int ret;
+
+ gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
+ if (!gpi_dev)
+ return -ENOMEM;
+
+ gpi_dev->dev = &pdev->dev;
+ gpi_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpi_dev->regs = devm_ioremap_resource(gpi_dev->dev, gpi_dev->res);
+ if (IS_ERR(gpi_dev->regs))
+ return PTR_ERR(gpi_dev->regs);
+ gpi_dev->ee_base = gpi_dev->regs;
+
+ ret = of_property_read_u32(gpi_dev->dev->of_node, "dma-channels",
+ &gpi_dev->max_gpii);
+ if (ret) {
+ dev_err(gpi_dev->dev, "missing 'max-no-gpii' DT node\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(gpi_dev->dev->of_node, "dma-channel-mask",
+ &gpi_dev->gpii_mask);
+ if (ret) {
+ dev_err(gpi_dev->dev, "missing 'gpii-mask' DT node\n");
+ return ret;
+ }
+
+ gpi_dev->ev_factor = EV_FACTOR;
+
+ ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(gpi_dev->dev, "Error setting dma_mask to 64, ret:%d\n", ret);
+ return ret;
+ }
+
+ gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev, sizeof(*gpi_dev->gpiis) *
+ gpi_dev->max_gpii, GFP_KERNEL);
+ if (!gpi_dev->gpiis)
+ return -ENOMEM;
+
+ /* setup all the supported gpii */
+ INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
+ for (i = 0; i < gpi_dev->max_gpii; i++) {
+ struct gpii *gpii = &gpi_dev->gpiis[i];
+ int chan;
+
+ if (!((1 << i) & gpi_dev->gpii_mask))
+ continue;
+
+ /* set up ev cntxt register map */
+ gpii->ev_cntxt_base_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
+ gpii->ev_cntxt_db_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
+ gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg + CNTXT_4_RING_RP_LSB;
+ gpii->ev_cmd_reg = gpi_dev->ee_base + GPII_n_EV_CH_CMD_OFFS(i);
+ gpii->ieob_clr_reg = gpi_dev->ee_base + GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
+
+ /* set up irq */
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0) {
+ dev_err(gpi_dev->dev, "platform_get_irq failed for %d:%d\n", i, ret);
+ return ret;
+ }
+ gpii->irq = ret;
+
+ /* set up channel specific register info */
+ for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
+ struct gchan *gchan = &gpii->gchan[chan];
+
+ /* set up ch cntxt register map */
+ gchan->ch_cntxt_base_reg = gpi_dev->ee_base +
+ GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
+ gchan->ch_cntxt_db_reg = gpi_dev->ee_base +
+ GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
+ gchan->ch_cmd_reg = gpi_dev->ee_base + GPII_n_CH_CMD_OFFS(i);
+
+ /* vchan setup */
+ vchan_init(&gchan->vc, &gpi_dev->dma_device);
+ gchan->vc.desc_free = gpi_desc_free;
+ gchan->chid = chan;
+ gchan->gpii = gpii;
+ gchan->dir = GPII_CHAN_DIR[chan];
+ }
+ mutex_init(&gpii->ctrl_lock);
+ rwlock_init(&gpii->pm_lock);
+ tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
+ (unsigned long)gpii);
+ init_completion(&gpii->cmd_completion);
+ gpii->gpii_id = i;
+ gpii->regs = gpi_dev->ee_base;
+ gpii->gpi_dev = gpi_dev;
+ }
+
+ platform_set_drvdata(pdev, gpi_dev);
+
+ /* clear and Set capabilities */
+ dma_cap_zero(gpi_dev->dma_device.cap_mask);
+ dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
+
+ /* configure dmaengine apis */
+ gpi_dev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ gpi_dev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ gpi_dev->dma_device.device_alloc_chan_resources = gpi_alloc_chan_resources;
+ gpi_dev->dma_device.device_free_chan_resources = gpi_free_chan_resources;
+ gpi_dev->dma_device.device_tx_status = dma_cookie_status;
+ gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
+ gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
+ gpi_dev->dma_device.device_config = gpi_peripheral_config;
+ gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
+ gpi_dev->dma_device.dev = gpi_dev->dev;
+ gpi_dev->dma_device.device_pause = gpi_pause;
+ gpi_dev->dma_device.device_resume = gpi_resume;
+
+ /* register with dmaengine framework */
+ ret = dma_async_device_register(&gpi_dev->dma_device);
+ if (ret) {
+ dev_err(gpi_dev->dev, "async_device_register failed ret:%d", ret);
+ return ret;
+ }
+
+ ret = of_dma_controller_register(gpi_dev->dev->of_node,
+ gpi_of_dma_xlate, gpi_dev);
+ if (ret) {
+ dev_err(gpi_dev->dev, "of_dma_controller_reg failed ret:%d", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct of_device_id gpi_of_match[] = {
+ { .compatible = "qcom,sdm845-gpi-dma" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpi_of_match);
+
+static struct platform_driver gpi_driver = {
+ .probe = gpi_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = gpi_of_match,
+ },
+};
+
+static int __init gpi_init(void)
+{
+ return platform_driver_register(&gpi_driver);
+}
+subsys_initcall(gpi_init)
+
+MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
new file mode 100644
index 000000000000..ee78bed8d60d
--- /dev/null
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -0,0 +1,905 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+/* ADM registers - calculated from channel number and security domain */
+#define ADM_CHAN_MULTI 0x4
+#define ADM_CI_MULTI 0x4
+#define ADM_CRCI_MULTI 0x4
+#define ADM_EE_MULTI 0x800
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
+#define ADM_EE_OFFS(ee) (ADM_EE_MULTI * (ee))
+#define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
+#define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
+#define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
+#define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
+#define ADM_CI_CONF(ci) (0x390 + (ci) * ADM_CI_MULTI)
+#define ADM_GP_CTL 0x3d8
+#define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \
+ ADM_EE_OFFS(ee))
+
+/* channel status */
+#define ADM_CH_STATUS_VALID BIT(1)
+
+/* channel result */
+#define ADM_CH_RSLT_VALID BIT(31)
+#define ADM_CH_RSLT_ERR BIT(3)
+#define ADM_CH_RSLT_FLUSH BIT(2)
+#define ADM_CH_RSLT_TPD BIT(1)
+
+/* channel conf */
+#define ADM_CH_CONF_SHADOW_EN BIT(12)
+#define ADM_CH_CONF_MPU_DISABLE BIT(11)
+#define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
+#define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
+#define ADM_CH_CONF_SEC_DOMAIN(ee) ((((ee) & 0x3) << 4) | (((ee) & 0x4) << 11))
+
+/* channel result conf */
+#define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
+#define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
+
+/* CRCI CTL */
+#define ADM_CRCI_CTL_MUX_SEL BIT(18)
+#define ADM_CRCI_CTL_RST BIT(17)
+
+/* CI configuration */
+#define ADM_CI_RANGE_END(x) ((x) << 24)
+#define ADM_CI_RANGE_START(x) ((x) << 16)
+#define ADM_CI_BURST_4_WORDS BIT(2)
+#define ADM_CI_BURST_8_WORDS BIT(3)
+
+/* GP CTL */
+#define ADM_GP_CTL_LP_EN BIT(12)
+#define ADM_GP_CTL_LP_CNT(x) ((x) << 8)
+
+/* Command pointer list entry */
+#define ADM_CPLE_LP BIT(31)
+#define ADM_CPLE_CMD_PTR_LIST BIT(29)
+
+/* Command list entry */
+#define ADM_CMD_LC BIT(31)
+#define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
+#define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
+
+#define ADM_CMD_TYPE_SINGLE 0x0
+#define ADM_CMD_TYPE_BOX 0x3
+
+#define ADM_CRCI_MUX_SEL BIT(4)
+#define ADM_DESC_ALIGN 8
+#define ADM_MAX_XFER (SZ_64K - 1)
+#define ADM_MAX_ROWS (SZ_64K - 1)
+#define ADM_MAX_CHANNELS 16
+
+struct adm_desc_hw_box {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 row_len;
+ u32 num_rows;
+ u32 row_offset;
+};
+
+struct adm_desc_hw_single {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 len;
+};
+
+struct adm_async_desc {
+ struct virt_dma_desc vd;
+ struct adm_device *adev;
+
+ size_t length;
+ enum dma_transfer_direction dir;
+ dma_addr_t dma_addr;
+ size_t dma_len;
+
+ void *cpl;
+ dma_addr_t cp_addr;
+ u32 crci;
+ u32 mux;
+ u32 blk_size;
+};
+
+struct adm_chan {
+ struct virt_dma_chan vc;
+ struct adm_device *adev;
+
+ /* parsed from DT */
+ u32 id; /* channel id */
+
+ struct adm_async_desc *curr_txd;
+ struct dma_slave_config slave;
+ struct list_head node;
+
+ int error;
+ int initialized;
+};
+
+static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
+{
+ return container_of(common, struct adm_chan, vc.chan);
+}
+
+struct adm_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct device_dma_parameters dma_parms;
+ struct adm_chan *channels;
+
+ u32 ee;
+
+ struct clk *core_clk;
+ struct clk *iface_clk;
+
+ struct reset_control *clk_reset;
+ struct reset_control *c0_reset;
+ struct reset_control *c1_reset;
+ struct reset_control *c2_reset;
+ int irq;
+};
+
+/**
+ * adm_free_chan - Frees dma resources associated with the specific channel
+ *
+ * @chan: dma channel
+ *
+ * Free all allocated descriptors associated with this channel
+ */
+static void adm_free_chan(struct dma_chan *chan)
+{
+ /* free all queued descriptors */
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+/**
+ * adm_get_blksize - Get block size from burst value
+ *
+ * @burst: Burst size of transaction
+ */
+static int adm_get_blksize(unsigned int burst)
+{
+ int ret;
+
+ switch (burst) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ ret = ffs(burst >> 4) - 1;
+ break;
+ case 192:
+ ret = 4;
+ break;
+ case 256:
+ ret = 5;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @crci: CRCI value
+ * @burst: Burst size of transaction
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_fc_descriptors(struct adm_chan *achan, void *desc,
+ struct scatterlist *sg, u32 crci,
+ u32 burst,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_box *box_desc = NULL;
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 rows, row_offset, crci_cmd;
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ crci_cmd = ADM_CMD_SRC_CRCI(crci);
+ row_offset = burst;
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ crci_cmd = ADM_CMD_DST_CRCI(crci);
+ row_offset = burst << 16;
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ while (remainder >= burst) {
+ box_desc = desc;
+ box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd;
+ box_desc->row_offset = row_offset;
+ box_desc->src_addr = *src;
+ box_desc->dst_addr = *dst;
+
+ rows = remainder / burst;
+ rows = min_t(u32, rows, ADM_MAX_ROWS);
+ box_desc->num_rows = rows << 16 | rows;
+ box_desc->row_len = burst << 16 | burst;
+
+ *incr_addr += burst * rows;
+ remainder -= burst * rows;
+ desc += sizeof(*box_desc);
+ }
+
+ /* if leftover bytes, do one single descriptor */
+ if (remainder) {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd;
+ single_desc->len = remainder;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ desc += sizeof(*single_desc);
+
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+ } else {
+ if (box_desc && sg_is_last(sg))
+ box_desc->cmd |= ADM_CMD_LC;
+ }
+
+ return desc;
+}
+
+/**
+ * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_non_fc_descriptors(struct adm_chan *achan, void *desc,
+ struct scatterlist *sg,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ do {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ single_desc->len = (remainder > ADM_MAX_XFER) ?
+ ADM_MAX_XFER : remainder;
+
+ remainder -= single_desc->len;
+ *incr_addr += single_desc->len;
+ desc += sizeof(*single_desc);
+ } while (remainder);
+
+ /* set last command if this is the end of the whole transaction */
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+
+ return desc;
+}
+
+/**
+ * adm_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags,
+ void *context)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+ struct scatterlist *sg;
+ dma_addr_t cple_addr;
+ u32 i, burst;
+ u32 single_count = 0, box_count = 0, crci = 0;
+ void *desc;
+ u32 *cple;
+ int blk_size = 0;
+
+ if (!is_slave_direction(direction)) {
+ dev_err(adev->dev, "invalid dma direction\n");
+ return NULL;
+ }
+
+ /*
+ * get burst value from slave configuration
+ */
+ burst = (direction == DMA_MEM_TO_DEV) ?
+ achan->slave.dst_maxburst :
+ achan->slave.src_maxburst;
+
+ /* if using flow control, validate burst and crci values */
+ if (achan->slave.device_fc) {
+ blk_size = adm_get_blksize(burst);
+ if (blk_size < 0) {
+ dev_err(adev->dev, "invalid burst value: %d\n",
+ burst);
+ return ERR_PTR(-EINVAL);
+ }
+
+ crci = achan->slave.slave_id & 0xf;
+ if (!crci || achan->slave.slave_id > 0x1f) {
+ dev_err(adev->dev, "invalid crci value\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* iterate through sgs and compute allocation size of structures */
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (achan->slave.device_fc) {
+ box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst,
+ ADM_MAX_ROWS);
+ if (sg_dma_len(sg) % burst)
+ single_count++;
+ } else {
+ single_count += DIV_ROUND_UP(sg_dma_len(sg),
+ ADM_MAX_XFER);
+ }
+ }
+
+ async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
+ if (!async_desc)
+ return ERR_PTR(-ENOMEM);
+
+ if (crci)
+ async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
+ ADM_CRCI_CTL_MUX_SEL : 0;
+ async_desc->crci = crci;
+ async_desc->blk_size = blk_size;
+ async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
+ box_count * sizeof(struct adm_desc_hw_box) +
+ sizeof(*cple) + 2 * ADM_DESC_ALIGN;
+
+ async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
+ if (!async_desc->cpl)
+ goto free;
+
+ async_desc->adev = adev;
+
+ /* both command list entry and descriptors must be 8 byte aligned */
+ cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
+ desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ async_desc->length += sg_dma_len(sg);
+
+ if (achan->slave.device_fc)
+ desc = adm_process_fc_descriptors(achan, desc, sg, crci,
+ burst, direction);
+ else
+ desc = adm_process_non_fc_descriptors(achan, desc, sg,
+ direction);
+ }
+
+ async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
+ async_desc->dma_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(adev->dev, async_desc->dma_addr))
+ goto free;
+
+ cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
+
+ /* init cmd list */
+ dma_sync_single_for_cpu(adev->dev, cple_addr, sizeof(*cple),
+ DMA_TO_DEVICE);
+ *cple = ADM_CPLE_LP;
+ *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3;
+ dma_sync_single_for_device(adev->dev, cple_addr, sizeof(*cple),
+ DMA_TO_DEVICE);
+
+ return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
+
+free:
+ kfree(async_desc);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * adm_terminate_all - terminate all transactions on a channel
+ * @chan: dma channel
+ *
+ * Dequeues and frees all transactions, aborts current transaction
+ * No callbacks are done
+ *
+ */
+static int adm_terminate_all(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ vchan_get_all_descriptors(&achan->vc, &head);
+
+ /* send flush command to terminate current transaction */
+ writel_relaxed(0x0,
+ adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee));
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&achan->vc, &head);
+
+ return 0;
+}
+
+static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flag;
+
+ spin_lock_irqsave(&achan->vc.lock, flag);
+ memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
+ spin_unlock_irqrestore(&achan->vc.lock, flag);
+
+ return 0;
+}
+
+/**
+ * adm_start_dma - start next transaction
+ * @achan: ADM dma channel
+ */
+static void adm_start_dma(struct adm_chan *achan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+
+ lockdep_assert_held(&achan->vc.lock);
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+
+ /* write next command list out to the CMD FIFO */
+ async_desc = container_of(vd, struct adm_async_desc, vd);
+ achan->curr_txd = async_desc;
+
+ /* reset channel error */
+ achan->error = 0;
+
+ if (!achan->initialized) {
+ /* enable interrupts */
+ writel(ADM_CH_CONF_SHADOW_EN |
+ ADM_CH_CONF_PERM_MPU_CONF |
+ ADM_CH_CONF_MPU_DISABLE |
+ ADM_CH_CONF_SEC_DOMAIN(adev->ee),
+ adev->regs + ADM_CH_CONF(achan->id));
+
+ writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN,
+ adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ achan->initialized = 1;
+ }
+
+ /* set the crci block size if this transaction requires CRCI */
+ if (async_desc->crci) {
+ writel(async_desc->mux | async_desc->blk_size,
+ adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
+ }
+
+ /* make sure IRQ enable doesn't get reordered */
+ wmb();
+
+ /* write next command list out to the CMD FIFO */
+ writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
+ adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee));
+}
+
+/**
+ * adm_dma_irq - irq handler for ADM controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t adm_dma_irq(int irq, void *data)
+{
+ struct adm_device *adev = data;
+ u32 srcs, i;
+ struct adm_async_desc *async_desc;
+ unsigned long flags;
+
+ srcs = readl_relaxed(adev->regs +
+ ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee));
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ struct adm_chan *achan = &adev->channels[i];
+ u32 status, result;
+
+ if (srcs & BIT(i)) {
+ status = readl_relaxed(adev->regs +
+ ADM_CH_STATUS_SD(i, adev->ee));
+
+ /* if no result present, skip */
+ if (!(status & ADM_CH_STATUS_VALID))
+ continue;
+
+ result = readl_relaxed(adev->regs +
+ ADM_CH_RSLT(i, adev->ee));
+
+ /* no valid results, skip */
+ if (!(result & ADM_CH_RSLT_VALID))
+ continue;
+
+ /* flag error if transaction was flushed or failed */
+ if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH))
+ achan->error = 1;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ async_desc = achan->curr_txd;
+
+ achan->curr_txd = NULL;
+
+ if (async_desc) {
+ vchan_cookie_complete(&async_desc->vd);
+
+ /* kick off next DMA */
+ adm_start_dma(achan);
+ }
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * adm_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ size_t residue = 0;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ vd = vchan_find_desc(&achan->vc, cookie);
+ if (vd)
+ residue = container_of(vd, struct adm_async_desc, vd)->length;
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ /*
+ * residue is either the full length if it is in the issued list, or 0
+ * if it is in progress. We have no reliable way of determining
+ * anything inbetween
+ */
+ dma_set_residue(txstate, residue);
+
+ if (achan->error)
+ return DMA_ERROR;
+
+ return ret;
+}
+
+/**
+ * adm_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Issues all pending transactions and starts DMA
+ */
+static void adm_issue_pending(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
+ adm_start_dma(achan);
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+}
+
+/**
+ * adm_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void adm_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct adm_async_desc *async_desc = container_of(vd,
+ struct adm_async_desc, vd);
+
+ dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr,
+ async_desc->dma_len, DMA_TO_DEVICE);
+ kfree(async_desc->cpl);
+ kfree(async_desc);
+}
+
+static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
+ u32 index)
+{
+ achan->id = index;
+ achan->adev = adev;
+
+ vchan_init(&achan->vc, &adev->common);
+ achan->vc.desc_free = adm_dma_free_desc;
+}
+
+static int adm_dma_probe(struct platform_device *pdev)
+{
+ struct adm_device *adev;
+ int ret;
+ u32 i;
+
+ adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->dev = &pdev->dev;
+
+ adev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adev->regs))
+ return PTR_ERR(adev->regs);
+
+ adev->irq = platform_get_irq(pdev, 0);
+ if (adev->irq < 0)
+ return adev->irq;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
+ if (ret) {
+ dev_err(adev->dev, "Execution environment unspecified\n");
+ return ret;
+ }
+
+ adev->core_clk = devm_clk_get(adev->dev, "core");
+ if (IS_ERR(adev->core_clk))
+ return PTR_ERR(adev->core_clk);
+
+ adev->iface_clk = devm_clk_get(adev->dev, "iface");
+ if (IS_ERR(adev->iface_clk))
+ return PTR_ERR(adev->iface_clk);
+
+ adev->clk_reset = devm_reset_control_get_exclusive(&pdev->dev, "clk");
+ if (IS_ERR(adev->clk_reset)) {
+ dev_err(adev->dev, "failed to get ADM0 reset\n");
+ return PTR_ERR(adev->clk_reset);
+ }
+
+ adev->c0_reset = devm_reset_control_get_exclusive(&pdev->dev, "c0");
+ if (IS_ERR(adev->c0_reset)) {
+ dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
+ return PTR_ERR(adev->c0_reset);
+ }
+
+ adev->c1_reset = devm_reset_control_get_exclusive(&pdev->dev, "c1");
+ if (IS_ERR(adev->c1_reset)) {
+ dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
+ return PTR_ERR(adev->c1_reset);
+ }
+
+ adev->c2_reset = devm_reset_control_get_exclusive(&pdev->dev, "c2");
+ if (IS_ERR(adev->c2_reset)) {
+ dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
+ return PTR_ERR(adev->c2_reset);
+ }
+
+ ret = clk_prepare_enable(adev->core_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(adev->iface_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable iface clock\n");
+ goto err_disable_core_clk;
+ }
+
+ reset_control_assert(adev->clk_reset);
+ reset_control_assert(adev->c0_reset);
+ reset_control_assert(adev->c1_reset);
+ reset_control_assert(adev->c2_reset);
+
+ udelay(2);
+
+ reset_control_deassert(adev->clk_reset);
+ reset_control_deassert(adev->c0_reset);
+ reset_control_deassert(adev->c1_reset);
+ reset_control_deassert(adev->c2_reset);
+
+ adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
+ sizeof(*adev->channels), GFP_KERNEL);
+
+ if (!adev->channels) {
+ ret = -ENOMEM;
+ goto err_disable_clks;
+ }
+
+ /* allocate and initialize channels */
+ INIT_LIST_HEAD(&adev->common.channels);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++)
+ adm_channel_init(adev, &adev->channels[i], i);
+
+ /* reset CRCIs */
+ for (i = 0; i < 16; i++)
+ writel(ADM_CRCI_CTL_RST, adev->regs +
+ ADM_CRCI_CTL(i, adev->ee));
+
+ /* configure client interfaces */
+ writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
+ writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
+ writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
+ writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
+ adev->regs + ADM_GP_CTL);
+
+ ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
+ 0, "adm_dma", adev);
+ if (ret)
+ goto err_disable_clks;
+
+ platform_set_drvdata(pdev, adev);
+
+ adev->common.dev = adev->dev;
+ adev->common.dev->dma_parms = &adev->dma_parms;
+
+ /* set capabilities */
+ dma_cap_zero(adev->common.cap_mask);
+ dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
+
+ /* initialize dmaengine apis */
+ adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
+ adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.device_free_chan_resources = adm_free_chan;
+ adev->common.device_prep_slave_sg = adm_prep_slave_sg;
+ adev->common.device_issue_pending = adm_issue_pending;
+ adev->common.device_tx_status = adm_tx_status;
+ adev->common.device_terminate_all = adm_terminate_all;
+ adev->common.device_config = adm_slave_config;
+
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ dev_err(adev->dev, "failed to register dma async device\n");
+ goto err_disable_clks;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id,
+ &adev->common);
+ if (ret)
+ goto err_unregister_dma;
+
+ return 0;
+
+err_unregister_dma:
+ dma_async_device_unregister(&adev->common);
+err_disable_clks:
+ clk_disable_unprepare(adev->iface_clk);
+err_disable_core_clk:
+ clk_disable_unprepare(adev->core_clk);
+
+ return ret;
+}
+
+static int adm_dma_remove(struct platform_device *pdev)
+{
+ struct adm_device *adev = platform_get_drvdata(pdev);
+ struct adm_chan *achan;
+ u32 i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&adev->common);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ achan = &adev->channels[i];
+
+ /* mask IRQs for this channel/EE pair */
+ writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ tasklet_kill(&adev->channels[i].vc.task);
+ adm_terminate_all(&adev->channels[i].vc.chan);
+ }
+
+ devm_free_irq(adev->dev, adev->irq, adev);
+
+ clk_disable_unprepare(adev->core_clk);
+ clk_disable_unprepare(adev->iface_clk);
+
+ return 0;
+}
+
+static const struct of_device_id adm_of_match[] = {
+ { .compatible = "qcom,adm", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adm_of_match);
+
+static struct platform_driver adm_dma_driver = {
+ .probe = adm_dma_probe,
+ .remove = adm_dma_remove,
+ .driver = {
+ .name = "adm-dma-engine",
+ .of_match_table = adm_of_match,
+ },
+};
+
+module_platform_driver(adm_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 528deb5d9f31..c4c4e8575764 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -326,10 +326,9 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
{
struct sf_pdma_chan *chan = dev_id;
struct pdma_regs *regs = &chan->regs;
- unsigned long flags;
u64 residue;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock(&chan->vchan.lock);
writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
residue = readq(regs->residue);
@@ -346,7 +345,7 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
sf_pdma_xfer_desc(chan);
}
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
return IRQ_HANDLED;
}
@@ -355,11 +354,10 @@ static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
{
struct sf_pdma_chan *chan = dev_id;
struct pdma_regs *regs = &chan->regs;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->lock);
writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
- spin_unlock_irqrestore(&chan->lock, flags);
+ spin_unlock(&chan->lock);
tasklet_schedule(&chan->err_tasklet);
@@ -584,7 +582,7 @@ static struct platform_driver sf_pdma_driver = {
.remove = sf_pdma_remove,
.driver = {
.name = "sf-pdma",
- .of_match_table = of_match_ptr(sf_pdma_dt_ids),
+ .of_match_table = sf_pdma_dt_ids,
},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 77ab1f4730be..4256e55bbf25 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1643,13 +1643,12 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
u32 row;
long chan = -1;
struct d40_chan *d40c;
- unsigned long flags;
struct d40_base *base = data;
u32 *regs = base->regs_interrupt;
struct d40_interrupt_lookup *il = base->gen_dmac.il;
u32 il_size = base->gen_dmac.il_size;
- spin_lock_irqsave(&base->interrupt_lock, flags);
+ spin_lock(&base->interrupt_lock);
/* Read interrupt status of both logical and physical channels */
for (i = 0; i < il_size; i++)
@@ -1694,7 +1693,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
spin_unlock(&d40c->lock);
}
- spin_unlock_irqrestore(&base->interrupt_lock, flags);
+ spin_unlock(&base->interrupt_lock);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index d0055d2f0b9a..f54ecb123a52 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -264,9 +264,11 @@ static int stm32_dma_get_width(struct stm32_dma_chan *chan,
}
static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
+ dma_addr_t buf_addr,
u32 threshold)
{
enum dma_slave_buswidth max_width;
+ u64 addr = buf_addr;
if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -277,6 +279,9 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
max_width = max_width >> 1;
+ if (do_div(addr, max_width))
+ max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
return max_width;
}
@@ -648,21 +653,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
- if (status & STM32_DMA_TCI) {
- stm32_dma_irq_clear(chan, STM32_DMA_TCI);
- if (scr & STM32_DMA_SCR_TCIE)
- stm32_dma_handle_chan_done(chan);
- status &= ~STM32_DMA_TCI;
- }
- if (status & STM32_DMA_HTI) {
- stm32_dma_irq_clear(chan, STM32_DMA_HTI);
- status &= ~STM32_DMA_HTI;
- }
if (status & STM32_DMA_FEI) {
stm32_dma_irq_clear(chan, STM32_DMA_FEI);
status &= ~STM32_DMA_FEI;
if (sfcr & STM32_DMA_SFCR_FEIE) {
- if (!(scr & STM32_DMA_SCR_EN))
+ if (!(scr & STM32_DMA_SCR_EN) &&
+ !(status & STM32_DMA_TCI))
dev_err(chan2dev(chan), "FIFO Error\n");
else
dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
@@ -674,6 +670,19 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if (sfcr & STM32_DMA_SCR_DMEIE)
dev_dbg(chan2dev(chan), "Direct mode overrun\n");
}
+
+ if (status & STM32_DMA_TCI) {
+ stm32_dma_irq_clear(chan, STM32_DMA_TCI);
+ if (scr & STM32_DMA_SCR_TCIE)
+ stm32_dma_handle_chan_done(chan);
+ status &= ~STM32_DMA_TCI;
+ }
+
+ if (status & STM32_DMA_HTI) {
+ stm32_dma_irq_clear(chan, STM32_DMA_HTI);
+ status &= ~STM32_DMA_HTI;
+ }
+
if (status) {
stm32_dma_irq_clear(chan, status);
dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
@@ -703,7 +712,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
enum dma_transfer_direction direction,
enum dma_slave_buswidth *buswidth,
- u32 buf_len)
+ u32 buf_len, dma_addr_t buf_addr)
{
enum dma_slave_buswidth src_addr_width, dst_addr_width;
int src_bus_width, dst_bus_width;
@@ -735,7 +744,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return dst_burst_size;
/* Set memory data size */
- src_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
+ src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+ fifoth);
chan->mem_width = src_addr_width;
src_bus_width = stm32_dma_get_width(chan, src_addr_width);
if (src_bus_width < 0)
@@ -784,7 +794,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return src_burst_size;
/* Set memory data size */
- dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
+ dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+ fifoth);
chan->mem_width = dst_addr_width;
dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
if (dst_bus_width < 0)
@@ -872,7 +883,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
- sg_dma_len(sg));
+ sg_dma_len(sg),
+ sg_dma_address(sg));
if (ret < 0)
goto err;
@@ -940,7 +952,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
return NULL;
}
- ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len);
+ ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len,
+ buf_addr);
if (ret < 0)
return NULL;
@@ -1216,6 +1229,8 @@ static void stm32_dma_free_chan_resources(struct dma_chan *c)
pm_runtime_put(dmadev->ddev.dev);
vchan_free_chan_resources(to_virt_chan(c));
+ stm32_dma_clear_reg(&chan->chan_reg);
+ chan->threshold = 0;
}
static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index a10ccd964376..ef0d0555103d 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -168,7 +168,7 @@ error_chan_id:
return ERR_PTR(ret);
}
-static const struct of_device_id stm32_stm32dma_master_match[] = {
+static const struct of_device_id stm32_stm32dma_master_match[] __maybe_unused = {
{ .compatible = "st,stm32-dma", },
{},
};
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 08cfbfab837b..36ba8b43e78d 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -199,7 +199,7 @@
#define STM32_MDMA_MAX_CHANNELS 63
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
-#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
+#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
enum stm32_mdma_trigger_mode {
STM32_MDMA_BUFFER,
@@ -339,7 +339,7 @@ static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
struct stm32_mdma_desc *desc;
int i;
- desc = kzalloc(offsetof(typeof(*desc), node[count]), GFP_NOWAIT);
+ desc = kzalloc(struct_size(desc, node, count), GFP_NOWAIT);
if (!desc)
return NULL;
@@ -1346,7 +1346,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
{
struct stm32_mdma_device *dmadev = devid;
struct stm32_mdma_chan *chan = devid;
- u32 reg, id, ien, status, flag;
+ u32 reg, id, ccr, ien, status;
/* Find out which channel generates the interrupt */
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
@@ -1368,67 +1368,71 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
chan = &dmadev->chan[id];
if (!chan) {
- dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
- goto exit;
+ dev_warn(mdma2dev(dmadev), "MDMA channel not initialized\n");
+ return IRQ_NONE;
}
/* Handle interrupt for the channel */
spin_lock(&chan->vchan.lock);
- status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
- ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
- ien &= STM32_MDMA_CCR_IRQ_MASK;
- ien >>= 1;
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
+ /* Mask Channel ReQuest Active bit which can be set in case of MEM2MEM */
+ status &= ~STM32_MDMA_CISR_CRQA;
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
+ ien = (ccr & STM32_MDMA_CCR_IRQ_MASK) >> 1;
if (!(status & ien)) {
spin_unlock(&chan->vchan.lock);
- dev_dbg(chan2dev(chan),
- "spurious it (status=0x%04x, ien=0x%04x)\n",
- status, ien);
+ dev_warn(chan2dev(chan),
+ "spurious it (status=0x%04x, ien=0x%04x)\n",
+ status, ien);
return IRQ_NONE;
}
- flag = __ffs(status & ien);
- reg = STM32_MDMA_CIFCR(chan->id);
+ reg = STM32_MDMA_CIFCR(id);
- switch (1 << flag) {
- case STM32_MDMA_CISR_TEIF:
- id = chan->id;
- status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id));
- dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status);
+ if (status & STM32_MDMA_CISR_TEIF) {
+ dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n",
+ readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)));
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
- break;
+ status &= ~STM32_MDMA_CISR_TEIF;
+ }
- case STM32_MDMA_CISR_CTCIF:
+ if (status & STM32_MDMA_CISR_CTCIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
+ status &= ~STM32_MDMA_CISR_CTCIF;
stm32_mdma_xfer_end(chan);
- break;
+ }
- case STM32_MDMA_CISR_BRTIF:
+ if (status & STM32_MDMA_CISR_BRTIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
- break;
+ status &= ~STM32_MDMA_CISR_BRTIF;
+ }
- case STM32_MDMA_CISR_BTIF:
+ if (status & STM32_MDMA_CISR_BTIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
+ status &= ~STM32_MDMA_CISR_BTIF;
chan->curr_hwdesc++;
if (chan->desc && chan->desc->cyclic) {
if (chan->curr_hwdesc == chan->desc->count)
chan->curr_hwdesc = 0;
vchan_cyclic_callback(&chan->desc->vdesc);
}
- break;
+ }
- case STM32_MDMA_CISR_TCIF:
+ if (status & STM32_MDMA_CISR_TCIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
- break;
+ status &= ~STM32_MDMA_CISR_TCIF;
+ }
- default:
- dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n",
- 1 << flag, status);
+ if (status) {
+ stm32_mdma_set_bits(dmadev, reg, status);
+ dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
+ if (!(ccr & STM32_MDMA_CCR_EN))
+ dev_err(chan2dev(chan), "chan disabled by HW\n");
}
spin_unlock(&chan->vchan.lock);
-exit:
return IRQ_HANDLED;
}
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index f5f9c86c50bc..5cadd4d2b824 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -1174,6 +1174,30 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = {
};
/*
+ * TODO: Add support for more than 4g physical addressing.
+ *
+ * The A100 binding uses the number of dma channels from the
+ * device tree node.
+ */
+static struct sun6i_dma_config sun50i_a100_dma_cfg = {
+ .clock_autogate_enable = sun6i_enable_clock_autogate_h3,
+ .set_burst_length = sun6i_set_burst_length_h3,
+ .set_drq = sun6i_set_drq_h6,
+ .set_mode = sun6i_set_mode_h6,
+ .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .has_mbus_clk = true,
+};
+
+/*
* The H6 binding uses the number of dma channels from the
* device tree node.
*/
@@ -1225,6 +1249,7 @@ static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
{ .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
+ { .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg },
{ .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg },
{ /* sentinel */ }
};
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index c5fa2ef74abc..4735742e826d 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -408,19 +408,18 @@ static irqreturn_t tegra_adma_isr(int irq, void *dev_id)
{
struct tegra_adma_chan *tdc = dev_id;
unsigned long status;
- unsigned long flags;
- spin_lock_irqsave(&tdc->vc.lock, flags);
+ spin_lock(&tdc->vc.lock);
status = tegra_adma_irq_clear(tdc);
if (status == 0 || !tdc->desc) {
- spin_unlock_irqrestore(&tdc->vc.lock, flags);
+ spin_unlock(&tdc->vc.lock);
return IRQ_NONE;
}
vchan_cyclic_callback(&tdc->desc->vd);
- spin_unlock_irqrestore(&tdc->vc.lock, flags);
+ spin_unlock(&tdc->vc.lock);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index 0c67254caee6..bd496efadff7 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
k3-psil-am654.o \
k3-psil-j721e.o \
- k3-psil-j7200.o
+ k3-psil-j7200.o \
+ k3-psil-am64.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index 4ba8fa5d9c36..71d24fc07c00 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -122,7 +122,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
return map;
}
-static const struct of_device_id ti_am335x_master_match[] = {
+static const struct of_device_id ti_am335x_master_match[] __maybe_unused = {
{ .compatible = "ti,edma3-tpcc", },
{},
};
@@ -292,7 +292,7 @@ static const u32 ti_dma_offset[] = {
[TI_XBAR_SDMA_OFFSET] = 1,
};
-static const struct of_device_id ti_dra7_master_match[] = {
+static const struct of_device_id ti_dra7_master_match[] __maybe_unused = {
{
.compatible = "ti,omap4430-sdma",
.data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
@@ -460,7 +460,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
static struct platform_driver ti_dma_xbar_driver = {
.driver = {
.name = "ti-dma-crossbar",
- .of_match_table = of_match_ptr(ti_dma_xbar_match),
+ .of_match_table = ti_dma_xbar_match,
},
.probe = ti_dma_xbar_probe,
};
diff --git a/drivers/dma/ti/k3-psil-am64.c b/drivers/dma/ti/k3-psil-am64.c
new file mode 100644
index 000000000000..9fdeaa11a4fc
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am64.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am64_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x4000, 17, 32, 8, 32, 0),
+ PSIL_SAUL(0x4001, 18, 32, 8, 33, 0),
+ PSIL_SAUL(0x4002, 19, 40, 8, 40, 0),
+ PSIL_SAUL(0x4003, 20, 40, 8, 41, 0),
+ /* ICSS_G0 */
+ PSIL_ETHERNET(0x4100, 21, 48, 16),
+ PSIL_ETHERNET(0x4101, 22, 64, 16),
+ PSIL_ETHERNET(0x4102, 23, 80, 16),
+ PSIL_ETHERNET(0x4103, 24, 96, 16),
+ /* ICSS_G1 */
+ PSIL_ETHERNET(0x4200, 25, 112, 16),
+ PSIL_ETHERNET(0x4201, 26, 128, 16),
+ PSIL_ETHERNET(0x4202, 27, 144, 16),
+ PSIL_ETHERNET(0x4203, 28, 160, 16),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ PSIL_PDMA_XY_PKT(0x430c),
+ PSIL_PDMA_XY_PKT(0x430d),
+ PSIL_PDMA_XY_PKT(0x430e),
+ PSIL_PDMA_XY_PKT(0x430f),
+ /* PDMA_MAIN0 - USART0-1 */
+ PSIL_PDMA_XY_PKT(0x4310),
+ PSIL_PDMA_XY_PKT(0x4311),
+ /* PDMA_MAIN1 - SPI4 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ /* PDMA_MAIN1 - USART2-6 */
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ PSIL_PDMA_XY_PKT(0x4407),
+ PSIL_PDMA_XY_PKT(0x4408),
+ /* PDMA_MAIN1 - ADCs */
+ PSIL_PDMA_XY_TR(0x440f),
+ PSIL_PDMA_XY_TR(0x4410),
+ /* CPSW2 */
+ PSIL_ETHERNET(0x4500, 16, 16, 16),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am64_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xc000, 24, 80, 8, 80, 1),
+ PSIL_SAUL(0xc001, 25, 88, 8, 88, 1),
+ /* ICSS_G0 */
+ PSIL_ETHERNET(0xc100, 26, 96, 1),
+ PSIL_ETHERNET(0xc101, 27, 97, 1),
+ PSIL_ETHERNET(0xc102, 28, 98, 1),
+ PSIL_ETHERNET(0xc103, 29, 99, 1),
+ PSIL_ETHERNET(0xc104, 30, 100, 1),
+ PSIL_ETHERNET(0xc105, 31, 101, 1),
+ PSIL_ETHERNET(0xc106, 32, 102, 1),
+ PSIL_ETHERNET(0xc107, 33, 103, 1),
+ /* ICSS_G1 */
+ PSIL_ETHERNET(0xc200, 34, 104, 1),
+ PSIL_ETHERNET(0xc201, 35, 105, 1),
+ PSIL_ETHERNET(0xc202, 36, 106, 1),
+ PSIL_ETHERNET(0xc203, 37, 107, 1),
+ PSIL_ETHERNET(0xc204, 38, 108, 1),
+ PSIL_ETHERNET(0xc205, 39, 109, 1),
+ PSIL_ETHERNET(0xc206, 40, 110, 1),
+ PSIL_ETHERNET(0xc207, 41, 111, 1),
+ /* CPSW2 */
+ PSIL_ETHERNET(0xc500, 16, 16, 8),
+ PSIL_ETHERNET(0xc501, 17, 24, 8),
+ PSIL_ETHERNET(0xc502, 18, 32, 8),
+ PSIL_ETHERNET(0xc503, 19, 40, 8),
+ PSIL_ETHERNET(0xc504, 20, 48, 8),
+ PSIL_ETHERNET(0xc505, 21, 56, 8),
+ PSIL_ETHERNET(0xc506, 22, 64, 8),
+ PSIL_ETHERNET(0xc507, 23, 72, 8),
+};
+
+struct psil_ep_map am64_ep_map = {
+ .name = "am64",
+ .src = am64_src_ep_map,
+ .src_count = ARRAY_SIZE(am64_src_ep_map),
+ .dst = am64_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am64_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index b4b0fb359eff..b74e192e3c2d 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -40,5 +40,6 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
extern struct psil_ep_map am654_ep_map;
extern struct psil_ep_map j721e_ep_map;
extern struct psil_ep_map j7200_ep_map;
+extern struct psil_ep_map am64_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 837853aab95a..13ce7367d870 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -20,6 +20,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM65X", .data = &am654_ep_map },
{ .family = "J721E", .data = &j721e_ep_map },
{ .family = "J7200", .data = &j7200_ep_map },
+ { .family = "AM64X", .data = &am64_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index a367584f0d7b..4fdd9f06b723 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -22,6 +22,7 @@
struct k3_udma_glue_common {
struct device *dev;
+ struct device chan_dev;
struct udma_dev *udmax;
const struct udma_tisci_rm *tisci_rm;
struct k3_ringacc *ringacc;
@@ -32,7 +33,8 @@ struct k3_udma_glue_common {
bool epib;
u32 psdata_size;
u32 swdata_size;
- u32 atype;
+ u32 atype_asel;
+ struct psil_endpoint_config *ep_config;
};
struct k3_udma_glue_tx_channel {
@@ -53,6 +55,8 @@ struct k3_udma_glue_tx_channel {
bool tx_filt_einfo;
bool tx_filt_pswords;
bool tx_supr_tdpkt;
+
+ int udma_tflow_id;
};
struct k3_udma_glue_rx_flow {
@@ -81,20 +85,26 @@ struct k3_udma_glue_rx_channel {
u32 flows_ready;
};
+static void k3_udma_chan_dev_release(struct device *dev)
+{
+ /* The struct containing the device is devm managed */
+}
+
+static struct class k3_udma_glue_devclass = {
+ .name = "k3_udma_glue_chan",
+ .dev_release = k3_udma_chan_dev_release,
+};
+
#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
static int of_k3_udma_glue_parse(struct device_node *udmax_np,
struct k3_udma_glue_common *common)
{
- common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
- "ti,ringacc");
- if (IS_ERR(common->ringacc))
- return PTR_ERR(common->ringacc);
-
common->udmax = of_xudma_dev_get(udmax_np, NULL);
if (IS_ERR(common->udmax))
return PTR_ERR(common->udmax);
+ common->ringacc = xudma_get_ringacc(common->udmax);
common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
return 0;
@@ -104,7 +114,6 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
const char *name, struct k3_udma_glue_common *common,
bool tx_chn)
{
- struct psil_endpoint_config *ep_config;
struct of_phandle_args dma_spec;
u32 thread_id;
int ret = 0;
@@ -121,15 +130,26 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
&dma_spec))
return -ENOENT;
+ ret = of_k3_udma_glue_parse(dma_spec.np, common);
+ if (ret)
+ goto out_put_spec;
+
thread_id = dma_spec.args[0];
if (dma_spec.args_count == 2) {
- if (dma_spec.args[1] > 2) {
+ if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
dev_err(common->dev, "Invalid channel atype: %u\n",
dma_spec.args[1]);
ret = -EINVAL;
goto out_put_spec;
}
- common->atype = dma_spec.args[1];
+ if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
+ dev_err(common->dev, "Invalid channel asel: %u\n",
+ dma_spec.args[1]);
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ common->atype_asel = dma_spec.args[1];
}
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
@@ -143,25 +163,23 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
}
/* get psil endpoint config */
- ep_config = psil_get_ep_config(thread_id);
- if (IS_ERR(ep_config)) {
+ common->ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(common->ep_config)) {
dev_err(common->dev,
"No configuration for psi-l thread 0x%04x\n",
thread_id);
- ret = PTR_ERR(ep_config);
+ ret = PTR_ERR(common->ep_config);
goto out_put_spec;
}
- common->epib = ep_config->needs_epib;
- common->psdata_size = ep_config->psd_size;
+ common->epib = common->ep_config->needs_epib;
+ common->psdata_size = common->ep_config->psd_size;
if (tx_chn)
common->dst_thread = thread_id;
else
common->src_thread = thread_id;
- ret = of_k3_udma_glue_parse(dma_spec.np, common);
-
out_put_spec:
of_node_put(dma_spec.np);
return ret;
@@ -227,7 +245,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
req.tx_supr_tdpkt = 1;
req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
- req.tx_atype = tx_chn->common.atype;
+ req.tx_atype = tx_chn->common.atype_asel;
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
@@ -259,8 +277,14 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
tx_chn->common.psdata_size,
tx_chn->common.swdata_size);
+ if (xudma_is_pktdma(tx_chn->common.udmax))
+ tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
+ else
+ tx_chn->udma_tchan_id = -1;
+
/* request and cfg UDMAP TX channel */
- tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
+ tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
+ tx_chn->udma_tchan_id);
if (IS_ERR(tx_chn->udma_tchanx)) {
ret = PTR_ERR(tx_chn->udma_tchanx);
dev_err(dev, "UDMAX tchanx get err %d\n", ret);
@@ -268,11 +292,34 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
}
tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
+ tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
+ dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
+ tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
+ ret = device_register(&tx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ tx_chn->common.chan_dev.parent = NULL;
+ goto err;
+ }
+
+ if (xudma_is_pktdma(tx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ tx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
+ if (xudma_is_pktdma(tx_chn->common.udmax))
+ tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
+ else
+ tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
+
/* request and cfg rings */
ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
- tx_chn->udma_tchan_id, -1,
+ tx_chn->udma_tflow_id, -1,
&tx_chn->ringtx,
&tx_chn->ringtxcq);
if (ret) {
@@ -280,6 +327,16 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
goto err;
}
+ /* Set the dma_dev for the rings to be configured */
+ cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
+ cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
+
+ /* Set the ASEL value for DMA rings of PKTDMA */
+ if (xudma_is_pktdma(tx_chn->common.udmax)) {
+ cfg->tx_cfg.asel = tx_chn->common.atype_asel;
+ cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
+ }
+
ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
@@ -303,19 +360,6 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
goto err;
}
- ret = xudma_navss_psil_pair(tx_chn->common.udmax,
- tx_chn->common.src_thread,
- tx_chn->common.dst_thread);
- if (ret) {
- dev_err(dev, "PSI-L request err %d\n", ret);
- goto err;
- }
-
- tx_chn->psil_paired = true;
-
- /* reset TX RT registers */
- k3_udma_glue_disable_tx_chn(tx_chn);
-
k3_udma_glue_dump_tx_chn(tx_chn);
return tx_chn;
@@ -344,6 +388,11 @@ void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
if (tx_chn->ringtx)
k3_ringacc_ring_free(tx_chn->ringtx);
+
+ if (tx_chn->common.chan_dev.parent) {
+ device_unregister(&tx_chn->common.chan_dev);
+ tx_chn->common.chan_dev.parent = NULL;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
@@ -378,6 +427,18 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
+ int ret;
+
+ ret = xudma_navss_psil_pair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
+ return ret;
+ }
+
+ tx_chn->psil_paired = true;
+
xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
UDMA_PEER_RT_EN_ENABLE);
@@ -398,6 +459,13 @@ void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
xudma_tchanrt_write(tx_chn->udma_tchanx,
UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
+
+ if (tx_chn->psil_paired) {
+ xudma_navss_psil_unpair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ tx_chn->psil_paired = false;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
@@ -437,13 +505,10 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
void *data,
void (*cleanup)(void *data, dma_addr_t desc_dma))
{
+ struct device *dev = tx_chn->common.dev;
dma_addr_t desc_dma;
int occ_tx, i, ret;
- /* reset TXCQ as it is not input for udma - expected to be empty */
- if (tx_chn->ringtxcq)
- k3_ringacc_ring_reset(tx_chn->ringtxcq);
-
/*
* TXQ reset need to be special way as it is input for udma and its
* state cached by udma, so:
@@ -452,17 +517,20 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
* 3) reset TXQ in a special way
*/
occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
- dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
+ dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
for (i = 0; i < occ_tx; i++) {
ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
if (ret) {
- dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
+ if (ret != -ENODATA)
+ dev_err(dev, "TX reset pop %d\n", ret);
break;
}
cleanup(data, desc_dma);
}
+ /* reset TXCQ as it is not input for udma - expected to be empty */
+ k3_ringacc_ring_reset(tx_chn->ringtxcq);
k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
@@ -481,12 +549,50 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
{
- tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+ if (xudma_is_pktdma(tx_chn->common.udmax)) {
+ tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
+ tx_chn->udma_tflow_id);
+ } else {
+ tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+ }
return tx_chn->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+struct device *
+ k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ if (xudma_is_pktdma(tx_chn->common.udmax) &&
+ (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
+ return &tx_chn->common.chan_dev;
+
+ return xudma_get_device(tx_chn->common.udmax);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
+
+void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(tx_chn->common.udmax) ||
+ !tx_chn->common.atype_asel)
+ return;
+
+ *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
+
+void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(tx_chn->common.udmax) ||
+ !tx_chn->common.atype_asel)
+ return;
+
+ *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
+
static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{
const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
@@ -498,8 +604,6 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id;
@@ -511,13 +615,16 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
* req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
*/
req.rxcq_qnum = 0xFFFF;
- if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
+ if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
+ rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
/* Default flow + extra ones */
+ req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
req.flowid_start = rx_chn->flow_id_base;
req.flowid_cnt = rx_chn->flow_num;
}
req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
- req.rx_atype = rx_chn->common.atype;
+ req.rx_atype = rx_chn->common.atype_asel;
ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
if (ret)
@@ -571,10 +678,18 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
goto err_rflow_put;
}
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ rx_ringfdq_id = flow->udma_rflow_id +
+ xudma_get_rflow_ring_offset(rx_chn->common.udmax);
+ rx_ring_id = 0;
+ } else {
+ rx_ring_id = flow_cfg->ring_rxq_id;
+ rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
+ }
+
/* request and cfg rings */
ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
- flow_cfg->ring_rxfdq0_id,
- flow_cfg->ring_rxq_id,
+ rx_ringfdq_id, rx_ring_id,
&flow->ringrxfdq,
&flow->ringrx);
if (ret) {
@@ -582,6 +697,16 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
goto err_rflow_put;
}
+ /* Set the dma_dev for the rings to be configured */
+ flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
+ flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
+
+ /* Set the ASEL value for DMA rings of PKTDMA */
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
+ flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
+ }
+
ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringrx %d\n", ret);
@@ -740,6 +865,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg)
{
struct k3_udma_glue_rx_channel *rx_chn;
+ struct psil_endpoint_config *ep_cfg;
int ret, i;
if (cfg->flow_id_num <= 0)
@@ -767,8 +893,16 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
rx_chn->common.psdata_size,
rx_chn->common.swdata_size);
+ ep_cfg = rx_chn->common.ep_config;
+
+ if (xudma_is_pktdma(rx_chn->common.udmax))
+ rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
+ else
+ rx_chn->udma_rchan_id = -1;
+
/* request and cfg UDMAP RX channel */
- rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
+ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
+ rx_chn->udma_rchan_id);
if (IS_ERR(rx_chn->udma_rchanx)) {
ret = PTR_ERR(rx_chn->udma_rchanx);
dev_err(dev, "UDMAX rchanx get err %d\n", ret);
@@ -776,12 +910,48 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
}
rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
- rx_chn->flow_num = cfg->flow_id_num;
- rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
+ rx_chn->udma_rchan_id, rx_chn->common.src_thread);
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ rx_chn->common.chan_dev.parent = NULL;
+ goto err;
+ }
- /* Use RX channel id as flow id: target dev can't generate flow_id */
- if (cfg->flow_id_use_rxchan_id)
- rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ int flow_start = cfg->flow_id_base;
+ int flow_end;
+
+ if (flow_start == -1)
+ flow_start = ep_cfg->flow_start;
+
+ flow_end = flow_start + cfg->flow_id_num - 1;
+ if (flow_start < ep_cfg->flow_start ||
+ flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
+ dev_err(dev, "Invalid flow range requested\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ rx_chn->flow_id_base = flow_start;
+ } else {
+ rx_chn->flow_id_base = cfg->flow_id_base;
+
+ /* Use RX channel id as flow id: target dev can't generate flow_id */
+ if (cfg->flow_id_use_rxchan_id)
+ rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+ }
+
+ rx_chn->flow_num = cfg->flow_id_num;
rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
sizeof(*rx_chn->flows), GFP_KERNEL);
@@ -815,19 +985,6 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
goto err;
}
- ret = xudma_navss_psil_pair(rx_chn->common.udmax,
- rx_chn->common.src_thread,
- rx_chn->common.dst_thread);
- if (ret) {
- dev_err(dev, "PSI-L request err %d\n", ret);
- goto err;
- }
-
- rx_chn->psil_paired = true;
-
- /* reset RX RT registers */
- k3_udma_glue_disable_rx_chn(rx_chn);
-
k3_udma_glue_dump_rx_chn(rx_chn);
return rx_chn;
@@ -884,6 +1041,24 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
goto err;
}
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
+ rx_chn->common.src_thread);
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ rx_chn->common.chan_dev.parent = NULL;
+ goto err;
+ }
+
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
if (ret)
goto err;
@@ -936,6 +1111,11 @@ void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
xudma_rchan_put(rx_chn->common.udmax,
rx_chn->udma_rchanx);
+
+ if (rx_chn->common.chan_dev.parent) {
+ device_unregister(&rx_chn->common.chan_dev);
+ rx_chn->common.chan_dev.parent = NULL;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
@@ -1052,12 +1232,24 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{
+ int ret;
+
if (rx_chn->remote)
return -EINVAL;
if (rx_chn->flows_ready < rx_chn->flow_num)
return -EINVAL;
+ ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
+ return ret;
+ }
+
+ rx_chn->psil_paired = true;
+
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
UDMA_CHAN_RT_CTL_EN);
@@ -1078,6 +1270,13 @@ void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
+
+ if (rx_chn->psil_paired) {
+ xudma_navss_psil_unpair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ rx_chn->psil_paired = false;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
@@ -1128,12 +1327,10 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
/* reset RXCQ as it is not input for udma - expected to be empty */
occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
- if (flow->ringrx)
- k3_ringacc_ring_reset(flow->ringrx);
/* Skip RX FDQ in case one FDQ is used for the set of flows */
if (skip_fdq)
- return;
+ goto do_reset;
/*
* RX FDQ reset need to be special way as it is input for udma and its
@@ -1148,13 +1345,17 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
for (i = 0; i < occ_rx; i++) {
ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
if (ret) {
- dev_err(dev, "RX reset pop %d\n", ret);
+ if (ret != -ENODATA)
+ dev_err(dev, "RX reset pop %d\n", ret);
break;
}
cleanup(data, desc_dma);
}
k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
+
+do_reset:
+ k3_ringacc_ring_reset(flow->ringrx);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
@@ -1184,8 +1385,52 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
flow = &rx_chn->flows[flow_num];
- flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
+ flow->udma_rflow_id);
+ } else {
+ flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+ }
return flow->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
+
+struct device *
+ k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ if (xudma_is_pktdma(rx_chn->common.udmax) &&
+ (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
+ return &rx_chn->common.chan_dev;
+
+ return xudma_get_device(rx_chn->common.udmax);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
+
+void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(rx_chn->common.udmax) ||
+ !rx_chn->common.atype_asel)
+ return;
+
+ *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
+
+void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(rx_chn->common.udmax) ||
+ !rx_chn->common.atype_asel)
+ return;
+
+ *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
+
+static int __init k3_udma_glue_class_init(void)
+{
+ return class_register(&k3_udma_glue_devclass);
+}
+arch_initcall(k3_udma_glue_class_init);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index 8563a392f30b..aada84f40723 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -50,6 +50,18 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
EXPORT_SYMBOL(of_xudma_dev_get);
+struct device *xudma_get_device(struct udma_dev *ud)
+{
+ return ud->dev;
+}
+EXPORT_SYMBOL(xudma_get_device);
+
+struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud)
+{
+ return ud->ringacc;
+}
+EXPORT_SYMBOL(xudma_get_ringacc);
+
u32 xudma_dev_get_psil_base(struct udma_dev *ud)
{
return ud->psil_base;
@@ -76,6 +88,9 @@ EXPORT_SYMBOL(xudma_free_gp_rflow_range);
bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
{
+ if (!ud->rflow_gp_map)
+ return false;
+
return !test_bit(id, ud->rflow_gp_map);
}
EXPORT_SYMBOL(xudma_rflow_is_gp);
@@ -107,6 +122,12 @@ void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
}
EXPORT_SYMBOL(xudma_rflow_put);
+int xudma_get_rflow_ring_offset(struct udma_dev *ud)
+{
+ return ud->tflow_cnt;
+}
+EXPORT_SYMBOL(xudma_get_rflow_ring_offset);
+
#define XUDMA_GET_RESOURCE_ID(res) \
int xudma_##res##_get_id(struct udma_##res *p) \
{ \
@@ -136,3 +157,27 @@ void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \
EXPORT_SYMBOL(xudma_##res##rt_write)
XUDMA_RT_IO_FUNCTIONS(tchan);
XUDMA_RT_IO_FUNCTIONS(rchan);
+
+int xudma_is_pktdma(struct udma_dev *ud)
+{
+ return ud->match_data->type == DMA_TYPE_PKTDMA;
+}
+EXPORT_SYMBOL(xudma_is_pktdma);
+
+int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id)
+{
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+
+ return ti_sci_inta_msi_get_virq(ud->dev, udma_tflow_id +
+ oes->pktdma_tchan_flow);
+}
+EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq);
+
+int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id)
+{
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+
+ return ti_sci_inta_msi_get_virq(ud->dev, udma_rflow_id +
+ oes->pktdma_rchan_flow);
+}
+EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 82cf6c77f5c9..298460438bb4 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -26,6 +26,7 @@
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/k3-event-router.h>
#include <linux/dma/ti-cppi5.h>
#include "../virt-dma.h"
@@ -55,14 +56,26 @@ struct udma_static_tr {
struct udma_chan;
+enum k3_dma_type {
+ DMA_TYPE_UDMA = 0,
+ DMA_TYPE_BCDMA,
+ DMA_TYPE_PKTDMA,
+};
+
enum udma_mmr {
MMR_GCFG = 0,
+ MMR_BCHANRT,
MMR_RCHANRT,
MMR_TCHANRT,
MMR_LAST,
};
-static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+static const char * const mmr_names[] = {
+ [MMR_GCFG] = "gcfg",
+ [MMR_BCHANRT] = "bchanrt",
+ [MMR_RCHANRT] = "rchanrt",
+ [MMR_TCHANRT] = "tchanrt",
+};
struct udma_tchan {
void __iomem *reg_rt;
@@ -70,8 +83,12 @@ struct udma_tchan {
int id;
struct k3_ring *t_ring; /* Transmit ring */
struct k3_ring *tc_ring; /* Transmit Completion ring */
+ int tflow_id; /* applicable only for PKTDMA */
+
};
+#define udma_bchan udma_tchan
+
struct udma_rflow {
int id;
struct k3_ring *fd_ring; /* Free Descriptor ring */
@@ -84,10 +101,29 @@ struct udma_rchan {
int id;
};
+struct udma_oes_offsets {
+ /* K3 UDMA Output Event Offset */
+ u32 udma_rchan;
+
+ /* BCDMA Output Event Offsets */
+ u32 bcdma_bchan_data;
+ u32 bcdma_bchan_ring;
+ u32 bcdma_tchan_data;
+ u32 bcdma_tchan_ring;
+ u32 bcdma_rchan_data;
+ u32 bcdma_rchan_ring;
+
+ /* PKTDMA Output Event Offsets */
+ u32 pktdma_tchan_flow;
+ u32 pktdma_rchan_flow;
+};
+
#define UDMA_FLAG_PDMA_ACC32 BIT(0)
#define UDMA_FLAG_PDMA_BURST BIT(1)
+#define UDMA_FLAG_TDTYPE BIT(2)
struct udma_match_data {
+ enum k3_dma_type type;
u32 psil_base;
bool enable_memcpy_support;
u32 flags;
@@ -95,7 +131,8 @@ struct udma_match_data {
};
struct udma_soc_data {
- u32 rchan_oes_offset;
+ struct udma_oes_offsets oes;
+ u32 bcdma_trigger_event_offset;
};
struct udma_hwdesc {
@@ -116,6 +153,11 @@ struct udma_rx_flush {
dma_addr_t buffer_paddr;
};
+struct udma_tpl {
+ u8 levels;
+ u32 start_idx[3];
+};
+
struct udma_dev {
struct dma_device ddev;
struct device *dev;
@@ -123,8 +165,9 @@ struct udma_dev {
const struct udma_match_data *match_data;
const struct udma_soc_data *soc_data;
- u8 tpl_levels;
- u32 tpl_start_idx[3];
+ struct udma_tpl bchan_tpl;
+ struct udma_tpl tchan_tpl;
+ struct udma_tpl rchan_tpl;
size_t desc_align; /* alignment to use for descriptors */
@@ -138,16 +181,21 @@ struct udma_dev {
struct udma_rx_flush rx_flush;
+ int bchan_cnt;
int tchan_cnt;
int echan_cnt;
int rchan_cnt;
int rflow_cnt;
+ int tflow_cnt;
+ unsigned long *bchan_map;
unsigned long *tchan_map;
unsigned long *rchan_map;
unsigned long *rflow_gp_map;
unsigned long *rflow_gp_map_allocated;
unsigned long *rflow_in_use;
+ unsigned long *tflow_map;
+ struct udma_bchan *bchans;
struct udma_tchan *tchans;
struct udma_rchan *rchans;
struct udma_rflow *rflows;
@@ -155,6 +203,7 @@ struct udma_dev {
struct udma_chan *channels;
u32 psil_base;
u32 atype;
+ u32 asel;
};
struct udma_desc {
@@ -199,6 +248,7 @@ struct udma_chan_config {
bool notdpkt; /* Suppress sending TDC packet */
int remote_thread_id;
u32 atype;
+ u32 asel;
u32 src_thread;
u32 dst_thread;
enum psil_endpoint_type ep_type;
@@ -206,6 +256,13 @@ struct udma_chan_config {
bool enable_burst;
enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+ u32 tr_trigger_type;
+
+ /* PKDMA mapped channel */
+ int mapped_channel_id;
+ /* PKTDMA default tflow or rflow for mapped channel */
+ int default_flow_id;
+
enum dma_transfer_direction dir;
};
@@ -213,11 +270,13 @@ struct udma_chan {
struct virt_dma_chan vc;
struct dma_slave_config cfg;
struct udma_dev *ud;
+ struct device *dma_dev;
struct udma_desc *desc;
struct udma_desc *terminated_desc;
struct udma_static_tr static_tr;
char *name;
+ struct udma_bchan *bchan;
struct udma_tchan *tchan;
struct udma_rchan *rchan;
struct udma_rflow *rflow;
@@ -353,10 +412,36 @@ static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
src_thread, dst_thread);
}
+static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
+{
+ struct device *chan_dev = &chan->dev->device;
+
+ if (asel == 0) {
+ /* No special handling for the channel */
+ chan->dev->chan_dma_dev = false;
+
+ chan_dev->dma_coherent = false;
+ chan_dev->dma_parms = NULL;
+ } else if (asel == 14 || asel == 15) {
+ chan->dev->chan_dma_dev = true;
+
+ chan_dev->dma_coherent = true;
+ dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
+ chan_dev->dma_parms = chan_dev->parent->dma_parms;
+ } else {
+ dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
+
+ chan_dev->dma_coherent = false;
+ chan_dev->dma_parms = NULL;
+ }
+}
+
static void udma_reset_uchan(struct udma_chan *uc)
{
memset(&uc->config, 0, sizeof(uc->config));
uc->config.remote_thread_id = -1;
+ uc->config.mapped_channel_id = -1;
+ uc->config.default_flow_id = -1;
uc->state = UDMA_CHAN_IS_IDLE;
}
@@ -439,9 +524,7 @@ static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
d->hwdesc[i].cppi5_desc_vaddr = NULL;
}
} else if (d->hwdesc[0].cppi5_desc_vaddr) {
- struct udma_dev *ud = uc->ud;
-
- dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+ dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
d->hwdesc[0].cppi5_desc_vaddr,
d->hwdesc[0].cppi5_desc_paddr);
@@ -670,8 +753,10 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
- val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
- udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ if (!uc->bchan) {
+ val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
}
if (uc->rchan) {
@@ -746,10 +831,16 @@ static void udma_start_desc(struct udma_chan *uc)
{
struct udma_chan_config *ucc = &uc->config;
- if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
+ (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
int i;
- /* Push all descriptors to ring for packet mode cyclic or RX */
+ /*
+ * UDMA only: Push all descriptors to ring for packet mode
+ * cyclic or RX
+ * PKTDMA supports pre-linked descriptor and cyclic is not
+ * supported
+ */
for (i = 0; i < uc->desc->sglen; i++)
udma_push_to_ring(uc, i);
} else {
@@ -1020,13 +1111,12 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
{
struct udma_chan *uc = data;
struct udma_desc *d;
- unsigned long flags;
dma_addr_t paddr = 0;
if (udma_pop_from_ring(uc, &paddr) || !paddr)
return IRQ_HANDLED;
- spin_lock_irqsave(&uc->vc.lock, flags);
+ spin_lock(&uc->vc.lock);
/* Teardown completion message */
if (cppi5_desc_is_tdcm(paddr)) {
@@ -1077,7 +1167,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
}
}
out:
- spin_unlock_irqrestore(&uc->vc.lock, flags);
+ spin_unlock(&uc->vc.lock);
return IRQ_HANDLED;
}
@@ -1086,9 +1176,8 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
{
struct udma_chan *uc = data;
struct udma_desc *d;
- unsigned long flags;
- spin_lock_irqsave(&uc->vc.lock, flags);
+ spin_lock(&uc->vc.lock);
d = uc->desc;
if (d) {
d->tr_idx = (d->tr_idx + 1) % d->sglen;
@@ -1103,7 +1192,7 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
}
}
- spin_unlock_irqrestore(&uc->vc.lock, flags);
+ spin_unlock(&uc->vc.lock);
return IRQ_HANDLED;
}
@@ -1181,10 +1270,12 @@ static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
if (test_bit(id, ud->rflow_in_use))
return ERR_PTR(-ENOENT);
- /* GP rflow has to be allocated first */
- if (!test_bit(id, ud->rflow_gp_map) &&
- !test_bit(id, ud->rflow_gp_map_allocated))
- return ERR_PTR(-EINVAL);
+ if (ud->rflow_gp_map) {
+ /* GP rflow has to be allocated first */
+ if (!test_bit(id, ud->rflow_gp_map) &&
+ !test_bit(id, ud->rflow_gp_map_allocated))
+ return ERR_PTR(-EINVAL);
+ }
dev_dbg(ud->dev, "get rflow%d\n", id);
set_bit(id, ud->rflow_in_use);
@@ -1215,10 +1306,10 @@ static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
} else { \
int start; \
\
- if (tpl >= ud->tpl_levels) \
- tpl = ud->tpl_levels - 1; \
+ if (tpl >= ud->res##_tpl.levels) \
+ tpl = ud->res##_tpl.levels - 1; \
\
- start = ud->tpl_start_idx[tpl]; \
+ start = ud->res##_tpl.start_idx[tpl]; \
\
id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
start); \
@@ -1231,9 +1322,39 @@ static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
return &ud->res##s[id]; \
}
+UDMA_RESERVE_RESOURCE(bchan);
UDMA_RESERVE_RESOURCE(tchan);
UDMA_RESERVE_RESOURCE(rchan);
+static int bcdma_get_bchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ enum udma_tp_level tpl;
+
+ if (uc->bchan) {
+ dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
+ uc->id, uc->bchan->id);
+ return 0;
+ }
+
+ /*
+ * Use normal channels for peripherals, and highest TPL channel for
+ * mem2mem
+ */
+ if (uc->config.tr_trigger_type)
+ tpl = 0;
+ else
+ tpl = ud->bchan_tpl.levels - 1;
+
+ uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
+ if (IS_ERR(uc->bchan))
+ return PTR_ERR(uc->bchan);
+
+ uc->tchan = uc->bchan;
+
+ return 0;
+}
+
static int udma_get_tchan(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1244,9 +1365,39 @@ static int udma_get_tchan(struct udma_chan *uc)
return 0;
}
- uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
+ /*
+ * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
+ * For PKTDMA mapped channels it is configured to a channel which must
+ * be used to service the peripheral.
+ */
+ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
+ uc->config.mapped_channel_id);
+ if (IS_ERR(uc->tchan))
+ return PTR_ERR(uc->tchan);
+
+ if (ud->tflow_cnt) {
+ int tflow_id;
+
+ /* Only PKTDMA have support for tx flows */
+ if (uc->config.default_flow_id >= 0)
+ tflow_id = uc->config.default_flow_id;
+ else
+ tflow_id = uc->tchan->id;
+
+ if (test_bit(tflow_id, ud->tflow_map)) {
+ dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
+ clear_bit(uc->tchan->id, ud->tchan_map);
+ uc->tchan = NULL;
+ return -ENOENT;
+ }
- return PTR_ERR_OR_ZERO(uc->tchan);
+ uc->tchan->tflow_id = tflow_id;
+ set_bit(tflow_id, ud->tflow_map);
+ } else {
+ uc->tchan->tflow_id = -1;
+ }
+
+ return 0;
}
static int udma_get_rchan(struct udma_chan *uc)
@@ -1259,7 +1410,13 @@ static int udma_get_rchan(struct udma_chan *uc)
return 0;
}
- uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
+ /*
+ * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
+ * For PKTDMA mapped channels it is configured to a channel which must
+ * be used to service the peripheral.
+ */
+ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
+ uc->config.mapped_channel_id);
return PTR_ERR_OR_ZERO(uc->rchan);
}
@@ -1287,8 +1444,11 @@ static int udma_get_chan_pair(struct udma_chan *uc)
/* Can be optimized, but let's have it like this for now */
end = min(ud->tchan_cnt, ud->rchan_cnt);
- /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
- chan_id = ud->tpl_start_idx[ud->tpl_levels - 1];
+ /*
+ * Try to use the highest TPL channel pair for MEM_TO_MEM channels
+ * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
+ */
+ chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
for (; chan_id < end; chan_id++) {
if (!test_bit(chan_id, ud->tchan_map) &&
!test_bit(chan_id, ud->rchan_map))
@@ -1303,6 +1463,9 @@ static int udma_get_chan_pair(struct udma_chan *uc)
uc->tchan = &ud->tchans[chan_id];
uc->rchan = &ud->rchans[chan_id];
+ /* UDMA does not use tx flows */
+ uc->tchan->tflow_id = -1;
+
return 0;
}
@@ -1326,6 +1489,19 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id)
return PTR_ERR_OR_ZERO(uc->rflow);
}
+static void bcdma_put_bchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->bchan) {
+ dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
+ uc->bchan->id);
+ clear_bit(uc->bchan->id, ud->bchan_map);
+ uc->bchan = NULL;
+ uc->tchan = NULL;
+ }
+}
+
static void udma_put_rchan(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1346,6 +1522,10 @@ static void udma_put_tchan(struct udma_chan *uc)
dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
uc->tchan->id);
clear_bit(uc->tchan->id, ud->tchan_map);
+
+ if (uc->tchan->tflow_id >= 0)
+ clear_bit(uc->tchan->tflow_id, ud->tflow_map);
+
uc->tchan = NULL;
}
}
@@ -1362,6 +1542,65 @@ static void udma_put_rflow(struct udma_chan *uc)
}
}
+static void bcdma_free_bchan_resources(struct udma_chan *uc)
+{
+ if (!uc->bchan)
+ return;
+
+ k3_ringacc_ring_free(uc->bchan->tc_ring);
+ k3_ringacc_ring_free(uc->bchan->t_ring);
+ uc->bchan->tc_ring = NULL;
+ uc->bchan->t_ring = NULL;
+ k3_configure_chan_coherency(&uc->vc.chan, 0);
+
+ bcdma_put_bchan(uc);
+}
+
+static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
+{
+ struct k3_ring_cfg ring_cfg;
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ ret = bcdma_get_bchan(uc);
+ if (ret)
+ return ret;
+
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
+ &uc->bchan->t_ring,
+ &uc->bchan->tc_ring);
+ if (ret) {
+ ret = -EBUSY;
+ goto err_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
+
+ k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
+ ring_cfg.asel = ud->asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+
+ ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(uc->bchan->tc_ring);
+ uc->bchan->tc_ring = NULL;
+ k3_ringacc_ring_free(uc->bchan->t_ring);
+ uc->bchan->t_ring = NULL;
+ k3_configure_chan_coherency(&uc->vc.chan, 0);
+err_ring:
+ bcdma_put_bchan(uc);
+
+ return ret;
+}
+
static void udma_free_tx_resources(struct udma_chan *uc)
{
if (!uc->tchan)
@@ -1379,15 +1618,22 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
{
struct k3_ring_cfg ring_cfg;
struct udma_dev *ud = uc->ud;
- int ret;
+ struct udma_tchan *tchan;
+ int ring_idx, ret;
ret = udma_get_tchan(uc);
if (ret)
return ret;
- ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
- &uc->tchan->t_ring,
- &uc->tchan->tc_ring);
+ tchan = uc->tchan;
+ if (tchan->tflow_id >= 0)
+ ring_idx = tchan->tflow_id;
+ else
+ ring_idx = ud->bchan_cnt + tchan->id;
+
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
+ &tchan->t_ring,
+ &tchan->tc_ring);
if (ret) {
ret = -EBUSY;
goto err_ring;
@@ -1396,10 +1642,18 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
memset(&ring_cfg, 0, sizeof(ring_cfg));
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
- ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ } else {
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
+
+ k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
+ ring_cfg.asel = uc->config.asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+ }
- ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
- ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+ ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
+ ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
if (ret)
goto err_ringcfg;
@@ -1452,14 +1706,23 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
if (uc->config.dir == DMA_MEM_TO_MEM)
return 0;
- ret = udma_get_rflow(uc, uc->rchan->id);
+ if (uc->config.default_flow_id >= 0)
+ ret = udma_get_rflow(uc, uc->config.default_flow_id);
+ else
+ ret = udma_get_rflow(uc, uc->rchan->id);
+
if (ret) {
ret = -EBUSY;
goto err_rflow;
}
rflow = uc->rflow;
- fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+ if (ud->tflow_cnt)
+ fd_ring_id = ud->tflow_cnt + rflow->id;
+ else
+ fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
+ uc->rchan->id;
+
ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
&rflow->fd_ring, &rflow->r_ring);
if (ret) {
@@ -1469,15 +1732,25 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
memset(&ring_cfg, 0, sizeof(ring_cfg));
- if (uc->config.pkt_mode)
- ring_cfg.size = SG_MAX_SEGMENTS;
- else
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ if (uc->config.pkt_mode)
+ ring_cfg.size = SG_MAX_SEGMENTS;
+ else
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ } else {
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
- ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
- ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
+ ring_cfg.asel = uc->config.asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+ }
ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
@@ -1499,7 +1772,18 @@ err_rflow:
return ret;
}
-#define TISCI_TCHAN_VALID_PARAMS ( \
+#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
+
+#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
+
+#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
+
+#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
@@ -1509,7 +1793,7 @@ err_rflow:
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
-#define TISCI_RCHAN_VALID_PARAMS ( \
+#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
@@ -1534,7 +1818,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
- req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
@@ -1548,7 +1832,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
return ret;
}
- req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
req_rx.index = rchan->id;
req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
@@ -1563,6 +1847,27 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
return ret;
}
+static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ struct udma_bchan *bchan = uc->bchan;
+ int ret = 0;
+
+ req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
+ req_tx.index = bchan->id;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
+
+ return ret;
+}
+
static int udma_tisci_tx_channel_config(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1583,7 +1888,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
fetch_size = sizeof(struct cppi5_desc_hdr_t);
}
- req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
req_tx.tx_chan_type = mode;
@@ -1591,6 +1896,13 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
req_tx.tx_fetch_size = fetch_size >> 2;
req_tx.txcq_qnum = tc_ring;
req_tx.tx_atype = uc->config.atype;
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ ud->match_data->flags & UDMA_FLAG_TDTYPE) {
+ /* wait for peer to complete the teardown for PDMAs */
+ req_tx.valid_params |=
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
+ req_tx.tx_tdtype = 1;
+ }
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret)
@@ -1599,6 +1911,35 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
return ret;
}
+static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ int ret = 0;
+
+ req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+ if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
+ /* wait for peer to complete the teardown for PDMAs */
+ req_tx.valid_params |=
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
+ req_tx.tx_tdtype = 1;
+ }
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+ return ret;
+}
+
+#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
+
static int udma_tisci_rx_channel_config(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1621,7 +1962,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
fetch_size = sizeof(struct cppi5_desc_hdr_t);
}
- req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
req_rx.index = rchan->id;
req_rx.rx_fetch_size = fetch_size >> 2;
@@ -1680,6 +2021,72 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
return 0;
}
+static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_rchan *rchan = uc->rchan;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ int ret = 0;
+
+ req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret)
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+
+ return ret;
+}
+
+static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
+ int ret = 0;
+
+ req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = uc->rchan->id;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret) {
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
+ return ret;
+ }
+
+ flow_req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
+
+ flow_req.nav_id = tisci_rm->tisci_dev_id;
+ flow_req.flow_index = uc->rflow->id;
+
+ if (uc->config.needs_epib)
+ flow_req.rx_einfo_present = 1;
+ else
+ flow_req.rx_einfo_present = 0;
+ if (uc->config.psd_size)
+ flow_req.rx_psinfo_present = 1;
+ else
+ flow_req.rx_psinfo_present = 0;
+ flow_req.rx_error_handling = 1;
+
+ ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
+
+ if (ret)
+ dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
+ ret);
+
+ return ret;
+}
+
static int udma_alloc_chan_resources(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
@@ -1689,6 +2096,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
u32 irq_udma_idx;
int ret;
+ uc->dma_dev = ud->dev;
+
if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
uc->use_dma_pool = true;
/* in case of MEM_TO_MEM we have maximum of two TRs */
@@ -1784,7 +2193,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
K3_PSIL_DST_THREAD_ID_OFFSET;
irq_ring = uc->rflow->r_ring;
- irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
+ irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
ret = udma_tisci_rx_channel_config(uc);
break;
@@ -1884,6 +2293,369 @@ err_cleanup:
return ret;
}
+static int bcdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 irq_udma_idx, irq_ring_idx;
+ int ret;
+
+ /* Only TR mode is supported */
+ uc->config.pkt_mode = false;
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ /* Non synchronized - mem to mem type of transfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+ uc->id);
+
+ ret = bcdma_alloc_bchan_resources(uc);
+ if (ret)
+ return ret;
+
+ irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
+ irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
+
+ ret = bcdma_tisci_m2m_channel_config(uc);
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
+ irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
+
+ ret = bcdma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
+ irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
+
+ ret = bcdma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ ret = -EBUSY;
+ goto err_res_free;
+ }
+ }
+
+ uc->dma_dev = dmaengine_get_dma_device(chan);
+ if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) {
+ uc->config.hdesc_size = cppi5_trdesc_calc_size(
+ sizeof(struct cppi5_tr_type15_t), 2);
+
+ uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+ uc->config.hdesc_size,
+ ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ return -ENOMEM;
+ }
+
+ uc->use_dma_pool = true;
+ } else if (uc->config.dir != DMA_MEM_TO_MEM) {
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev,
+ "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+ }
+
+ uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ irq_ring_idx);
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ /* Event from BCDMA (TR events) only needed for slave channels */
+ if (is_slave_direction(uc->config.dir)) {
+ uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+ irq_udma_idx);
+ if (uc->irq_num_udma <= 0) {
+ dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
+ irq_udma_idx);
+ free_irq(uc->irq_num_ring, uc);
+ ret = -EINVAL;
+ goto err_irq_free;
+ }
+
+ ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+ uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
+ uc->id);
+ free_irq(uc->irq_num_ring, uc);
+ goto err_irq_free;
+ }
+ } else {
+ uc->irq_num_udma = 0;
+ }
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+ uc->irq_num_udma = 0;
+err_psi_free:
+ if (uc->psil_paired)
+ navss_psil_unpair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ bcdma_free_bchan_resources(uc);
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+
+ return ret;
+}
+
+static int bcdma_router_config(struct dma_chan *chan)
+{
+ struct k3_event_route_data *router_data = chan->route_data;
+ struct udma_chan *uc = to_udma_chan(chan);
+ u32 trigger_event;
+
+ if (!uc->bchan)
+ return -EINVAL;
+
+ if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
+ return -EINVAL;
+
+ trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
+ trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
+
+ return router_data->set_event(router_data->priv, trigger_event);
+}
+
+static int pktdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 irq_ring_idx;
+ int ret;
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
+
+ ret = pktdma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
+
+ ret = pktdma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ ret = -EBUSY;
+ goto err_res_free;
+ }
+ }
+
+ uc->dma_dev = dmaengine_get_dma_device(chan);
+ uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
+ uc->config.hdesc_size, ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ ret = -ENOMEM;
+ goto err_res_free;
+ }
+
+ uc->use_dma_pool = true;
+
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+
+ uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ irq_ring_idx);
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ uc->irq_num_udma = 0;
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+
+ if (uc->tchan)
+ dev_dbg(ud->dev,
+ "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
+ uc->id, uc->tchan->id, uc->tchan->tflow_id,
+ uc->config.remote_thread_id);
+ else if (uc->rchan)
+ dev_dbg(ud->dev,
+ "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
+ uc->id, uc->rchan->id, uc->rflow->id,
+ uc->config.remote_thread_id);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+err_psi_free:
+ navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+
+ return ret;
+}
+
static int udma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
@@ -2028,6 +2800,7 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
size_t tr_size;
int num_tr = 0;
int tr_idx = 0;
+ u64 asel;
/* estimate the number of TRs we will need */
for_each_sg(sgl, sgent, sglen, i) {
@@ -2045,6 +2818,11 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
d->sglen = sglen;
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ asel = 0;
+ else
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
tr_req = d->hwdesc[0].tr_req_base;
for_each_sg(sgl, sgent, sglen, i) {
dma_addr_t sg_addr = sg_dma_address(sgent);
@@ -2063,6 +2841,7 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ sg_addr |= asel;
tr_req[tr_idx].addr = sg_addr;
tr_req[tr_idx].icnt0 = tr0_cnt0;
tr_req[tr_idx].icnt1 = tr0_cnt1;
@@ -2092,6 +2871,205 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
return d;
}
+static struct udma_desc *
+udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen,
+ enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct scatterlist *sgent;
+ struct cppi5_tr_type15_t *tr_req = NULL;
+ enum dma_slave_buswidth dev_width;
+ u16 tr_cnt0, tr_cnt1;
+ dma_addr_t dev_addr;
+ struct udma_desc *d;
+ unsigned int i;
+ size_t tr_size, sg_len;
+ int num_tr = 0;
+ int tr_idx = 0;
+ u32 burst, trigger_size, port_window;
+ u64 asel;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = uc->cfg.src_addr;
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ port_window = uc->cfg.src_port_window_size;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = uc->cfg.dst_addr;
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ port_window = uc->cfg.dst_port_window_size;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (port_window) {
+ if (port_window != burst) {
+ dev_err(uc->ud->dev,
+ "The burst must be equal to port_window\n");
+ return NULL;
+ }
+
+ tr_cnt0 = dev_width * port_window;
+ tr_cnt1 = 1;
+ } else {
+ tr_cnt0 = dev_width;
+ tr_cnt1 = burst;
+ }
+ trigger_size = tr_cnt0 * tr_cnt1;
+
+ /* estimate the number of TRs we will need */
+ for_each_sg(sgl, sgent, sglen, i) {
+ sg_len = sg_dma_len(sgent);
+
+ if (sg_len % trigger_size) {
+ dev_err(uc->ud->dev,
+ "Not aligned SG entry (%zu for %u)\n", sg_len,
+ trigger_size);
+ return NULL;
+ }
+
+ if (sg_len / trigger_size < SZ_64K)
+ num_tr++;
+ else
+ num_tr += 2;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type15_t);
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
+ asel = 0;
+ } else {
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+ dev_addr |= asel;
+ }
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for_each_sg(sgl, sgent, sglen, i) {
+ u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+
+ sg_len = sg_dma_len(sgent);
+ num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
+ &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ sg_len);
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
+ true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
+ uc->config.tr_trigger_type,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
+
+ sg_addr |= asel;
+ if (dir == DMA_DEV_TO_MEM) {
+ tr_req[tr_idx].addr = dev_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr0_cnt2;
+ tr_req[tr_idx].icnt3 = tr0_cnt3;
+ tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
+
+ tr_req[tr_idx].daddr = sg_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr0_cnt2;
+ tr_req[tr_idx].dicnt3 = tr0_cnt3;
+ tr_req[tr_idx].ddim1 = tr_cnt0;
+ tr_req[tr_idx].ddim2 = trigger_size;
+ tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
+ } else {
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr0_cnt2;
+ tr_req[tr_idx].icnt3 = tr0_cnt3;
+ tr_req[tr_idx].dim1 = tr_cnt0;
+ tr_req[tr_idx].dim2 = trigger_size;
+ tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
+
+ tr_req[tr_idx].daddr = dev_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr0_cnt2;
+ tr_req[tr_idx].dicnt3 = tr0_cnt3;
+ tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
+ }
+
+ tr_idx++;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
+ false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
+ uc->config.tr_trigger_type,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+ 0, 0);
+
+ sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
+ if (dir == DMA_DEV_TO_MEM) {
+ tr_req[tr_idx].addr = dev_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr1_cnt2;
+ tr_req[tr_idx].icnt3 = 1;
+ tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
+
+ tr_req[tr_idx].daddr = sg_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr1_cnt2;
+ tr_req[tr_idx].dicnt3 = 1;
+ tr_req[tr_idx].ddim1 = tr_cnt0;
+ tr_req[tr_idx].ddim2 = trigger_size;
+ } else {
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr1_cnt2;
+ tr_req[tr_idx].icnt3 = 1;
+ tr_req[tr_idx].dim1 = tr_cnt0;
+ tr_req[tr_idx].dim2 = trigger_size;
+
+ tr_req[tr_idx].daddr = dev_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr1_cnt2;
+ tr_req[tr_idx].dicnt3 = 1;
+ tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
+ }
+ tr_idx++;
+ }
+
+ d->residue += sg_len;
+ }
+
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
+ CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
+
+ return d;
+}
+
static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
enum dma_slave_buswidth dev_width,
u16 elcnt)
@@ -2156,6 +3134,7 @@ udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
struct udma_desc *d;
u32 ring_id;
unsigned int i;
+ u64 asel;
d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
if (!d)
@@ -2169,6 +3148,11 @@ udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
else
ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ asel = 0;
+ else
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
for_each_sg(sgl, sgent, sglen, i) {
struct udma_hwdesc *hwdesc = &d->hwdesc[i];
dma_addr_t sg_addr = sg_dma_address(sgent);
@@ -2203,14 +3187,16 @@ udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
}
/* attach the sg buffer to the descriptor */
+ sg_addr |= asel;
cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
/* Attach link as host buffer descriptor */
if (h_desc)
cppi5_hdesc_link_hbdesc(h_desc,
- hwdesc->cppi5_desc_paddr);
+ hwdesc->cppi5_desc_paddr | asel);
- if (dir == DMA_MEM_TO_DEV)
+ if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
+ dir == DMA_MEM_TO_DEV)
h_desc = desc;
}
@@ -2333,7 +3319,8 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct udma_desc *d;
u32 burst;
- if (dir != uc->config.dir) {
+ if (dir != uc->config.dir &&
+ (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
dev_err(chan->device->dev,
"%s: chan%d is for %s, not supporting %s\n",
__func__, uc->id,
@@ -2359,9 +3346,12 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (uc->config.pkt_mode)
d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
context);
- else
+ else if (is_slave_direction(uc->config.dir))
d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
context);
+ else
+ d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
+ tx_flags, context);
if (!d)
return NULL;
@@ -2415,7 +3405,12 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
return NULL;
tr_req = d->hwdesc[0].tr_req_base;
- period_addr = buf_addr;
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ period_addr = buf_addr;
+ else
+ period_addr = buf_addr |
+ ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -2480,6 +3475,9 @@ udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
else
ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+ if (uc->ud->match_data->type != DMA_TYPE_UDMA)
+ buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
for (i = 0; i < periods; i++) {
struct udma_hwdesc *hwdesc = &d->hwdesc[i];
dma_addr_t period_addr = buf_addr + (period_len * i);
@@ -2621,6 +3619,11 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
d->tr_idx = 0;
d->residue = len;
+ if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
+ src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ }
+
tr_req = d->hwdesc[0].tr_req_base;
cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
@@ -2978,6 +3981,7 @@ static void udma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&uc->vc);
tasklet_kill(&uc->vc.task);
+ bcdma_free_bchan_resources(uc);
udma_free_tx_resources(uc);
udma_free_rx_resources(uc);
udma_reset_uchan(uc);
@@ -2989,10 +3993,14 @@ static void udma_free_chan_resources(struct dma_chan *chan)
}
static struct platform_driver udma_driver;
+static struct platform_driver bcdma_driver;
+static struct platform_driver pktdma_driver;
struct udma_filter_param {
int remote_thread_id;
u32 atype;
+ u32 asel;
+ u32 tr_trigger_type;
};
static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
@@ -3003,7 +4011,9 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
struct udma_chan *uc;
struct udma_dev *ud;
- if (chan->device->dev->driver != &udma_driver.driver)
+ if (chan->device->dev->driver != &udma_driver.driver &&
+ chan->device->dev->driver != &bcdma_driver.driver &&
+ chan->device->dev->driver != &pktdma_driver.driver)
return false;
uc = to_udma_chan(chan);
@@ -3017,13 +4027,25 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
return false;
}
+ if (filter_param->asel > 15) {
+ dev_err(ud->dev, "Invalid channel asel: %u\n",
+ filter_param->asel);
+ return false;
+ }
+
ucc->remote_thread_id = filter_param->remote_thread_id;
ucc->atype = filter_param->atype;
+ ucc->asel = filter_param->asel;
+ ucc->tr_trigger_type = filter_param->tr_trigger_type;
- if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+ if (ucc->tr_trigger_type) {
+ ucc->dir = DMA_MEM_TO_MEM;
+ goto triggered_bchan;
+ } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
ucc->dir = DMA_MEM_TO_DEV;
- else
+ } else {
ucc->dir = DMA_DEV_TO_MEM;
+ }
ep_config = psil_get_ep_config(ucc->remote_thread_id);
if (IS_ERR(ep_config)) {
@@ -3032,6 +4054,19 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->dir = DMA_MEM_TO_MEM;
ucc->remote_thread_id = -1;
ucc->atype = 0;
+ ucc->asel = 0;
+ return false;
+ }
+
+ if (ud->match_data->type == DMA_TYPE_BCDMA &&
+ ep_config->pkt_mode) {
+ dev_err(ud->dev,
+ "Only TR mode is supported (psi-l thread 0x%04x)\n",
+ ucc->remote_thread_id);
+ ucc->dir = DMA_MEM_TO_MEM;
+ ucc->remote_thread_id = -1;
+ ucc->atype = 0;
+ ucc->asel = 0;
return false;
}
@@ -3040,6 +4075,15 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->notdpkt = ep_config->notdpkt;
ucc->ep_type = ep_config->ep_type;
+ if (ud->match_data->type == DMA_TYPE_PKTDMA &&
+ ep_config->mapped_channel_id >= 0) {
+ ucc->mapped_channel_id = ep_config->mapped_channel_id;
+ ucc->default_flow_id = ep_config->default_flow_id;
+ } else {
+ ucc->mapped_channel_id = -1;
+ ucc->default_flow_id = -1;
+ }
+
if (ucc->ep_type != PSIL_EP_NATIVE) {
const struct udma_match_data *match_data = ud->match_data;
@@ -3063,6 +4107,13 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
return true;
+
+triggered_bchan:
+ dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
+ ucc->tr_trigger_type);
+
+ return true;
+
}
static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
@@ -3073,14 +4124,33 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct udma_filter_param filter_param;
struct dma_chan *chan;
- if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
- return NULL;
+ if (ud->match_data->type == DMA_TYPE_BCDMA) {
+ if (dma_spec->args_count != 3)
+ return NULL;
- filter_param.remote_thread_id = dma_spec->args[0];
- if (dma_spec->args_count == 2)
- filter_param.atype = dma_spec->args[1];
- else
+ filter_param.tr_trigger_type = dma_spec->args[0];
+ filter_param.remote_thread_id = dma_spec->args[1];
+ filter_param.asel = dma_spec->args[2];
filter_param.atype = 0;
+ } else {
+ if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
+ return NULL;
+
+ filter_param.remote_thread_id = dma_spec->args[0];
+ filter_param.tr_trigger_type = 0;
+ if (dma_spec->args_count == 2) {
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ filter_param.atype = dma_spec->args[1];
+ filter_param.asel = 0;
+ } else {
+ filter_param.atype = 0;
+ filter_param.asel = dma_spec->args[1];
+ }
+ } else {
+ filter_param.atype = 0;
+ filter_param.asel = 0;
+ }
+ }
chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
@@ -3093,28 +4163,48 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
static struct udma_match_data am654_main_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x1000,
.enable_memcpy_support = true,
.statictr_z_mask = GENMASK(11, 0),
};
static struct udma_match_data am654_mcu_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x6000,
.enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
};
static struct udma_match_data j721e_main_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x1000,
.enable_memcpy_support = true,
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
.statictr_z_mask = GENMASK(23, 0),
};
static struct udma_match_data j721e_mcu_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x6000,
.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .statictr_z_mask = GENMASK(23, 0),
+};
+
+static struct udma_match_data am64_bcdma_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
+ .enable_memcpy_support = true, /* Supported via bchan */
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .statictr_z_mask = GENMASK(23, 0),
+};
+
+static struct udma_match_data am64_pktdma_data = {
+ .type = DMA_TYPE_PKTDMA,
+ .psil_base = 0x1000,
+ .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
.statictr_z_mask = GENMASK(23, 0),
};
@@ -3136,30 +4226,105 @@ static const struct of_device_id udma_of_match[] = {
{ /* Sentinel */ },
};
+static const struct of_device_id bcdma_of_match[] = {
+ {
+ .compatible = "ti,am64-dmss-bcdma",
+ .data = &am64_bcdma_data,
+ },
+ { /* Sentinel */ },
+};
+
+static const struct of_device_id pktdma_of_match[] = {
+ {
+ .compatible = "ti,am64-dmss-pktdma",
+ .data = &am64_pktdma_data,
+ },
+ { /* Sentinel */ },
+};
+
static struct udma_soc_data am654_soc_data = {
- .rchan_oes_offset = 0x200,
+ .oes = {
+ .udma_rchan = 0x200,
+ },
};
static struct udma_soc_data j721e_soc_data = {
- .rchan_oes_offset = 0x400,
+ .oes = {
+ .udma_rchan = 0x400,
+ },
};
static struct udma_soc_data j7200_soc_data = {
- .rchan_oes_offset = 0x80,
+ .oes = {
+ .udma_rchan = 0x80,
+ },
+};
+
+static struct udma_soc_data am64_soc_data = {
+ .oes = {
+ .bcdma_bchan_data = 0x2200,
+ .bcdma_bchan_ring = 0x2400,
+ .bcdma_tchan_data = 0x2800,
+ .bcdma_tchan_ring = 0x2a00,
+ .bcdma_rchan_data = 0x2e00,
+ .bcdma_rchan_ring = 0x3000,
+ .pktdma_tchan_flow = 0x1200,
+ .pktdma_rchan_flow = 0x1600,
+ },
+ .bcdma_trigger_event_offset = 0xc400,
};
static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM65X", .data = &am654_soc_data },
{ .family = "J721E", .data = &j721e_soc_data },
{ .family = "J7200", .data = &j7200_soc_data },
+ { .family = "AM64X", .data = &am64_soc_data },
{ /* sentinel */ }
};
static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
{
+ u32 cap2, cap3, cap4;
int i;
- for (i = 0; i < MMR_LAST; i++) {
+ ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
+ if (IS_ERR(ud->mmrs[MMR_GCFG]))
+ return PTR_ERR(ud->mmrs[MMR_GCFG]);
+
+ cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
+ ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
+ ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
+ ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
+ break;
+ case DMA_TYPE_BCDMA:
+ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
+ ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
+ break;
+ case DMA_TYPE_PKTDMA:
+ cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
+ ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
+ ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
+ ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
+ ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 1; i < MMR_LAST; i++) {
+ if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
+ continue;
+ if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
+ continue;
+ if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
+ continue;
+
ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
if (IS_ERR(ud->mmrs[i]))
return PTR_ERR(ud->mmrs[i]);
@@ -3168,48 +4333,58 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
return 0;
}
+static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
+ struct ti_sci_resource_desc *rm_desc,
+ char *name)
+{
+ bitmap_clear(map, rm_desc->start, rm_desc->num);
+ bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
+ dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
+ rm_desc->start, rm_desc->num, rm_desc->start_sec,
+ rm_desc->num_sec);
+}
+
+static const char * const range_names[] = {
+ [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
+ [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
+ [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
+ [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
+ [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
+};
+
static int udma_setup_resources(struct udma_dev *ud)
{
+ int ret, i, j;
struct device *dev = ud->dev;
- int ch_count, ret, i, j;
- u32 cap2, cap3;
- struct ti_sci_resource_desc *rm_desc;
struct ti_sci_resource *rm_res, irq_res;
struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
- static const char * const range_names[] = { "ti,sci-rm-range-tchan",
- "ti,sci-rm-range-rchan",
- "ti,sci-rm-range-rflow" };
-
- cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2));
- cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3));
-
- ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
- ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
- ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
- ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
- ch_count = ud->tchan_cnt + ud->rchan_cnt;
+ u32 cap3;
/* Set up the throughput level start indexes */
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
if (of_device_is_compatible(dev->of_node,
"ti,am654-navss-main-udmap")) {
- ud->tpl_levels = 2;
- ud->tpl_start_idx[0] = 8;
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = 8;
} else if (of_device_is_compatible(dev->of_node,
"ti,am654-navss-mcu-udmap")) {
- ud->tpl_levels = 2;
- ud->tpl_start_idx[0] = 2;
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = 2;
} else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
- ud->tpl_levels = 3;
- ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
- ud->tpl_start_idx[0] = ud->tpl_start_idx[1] +
- UDMA_CAP3_HCHAN_CNT(cap3);
+ ud->tchan_tpl.levels = 3;
+ ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
- ud->tpl_levels = 2;
- ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
} else {
- ud->tpl_levels = 1;
+ ud->tchan_tpl.levels = 1;
}
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+
ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
sizeof(unsigned long), GFP_KERNEL);
ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
@@ -3247,11 +4422,15 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
/* Get resource ranges from tisci */
- for (i = 0; i < RM_RANGE_LAST; i++)
+ for (i = 0; i < RM_RANGE_LAST; i++) {
+ if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
+ continue;
+
tisci_rm->rm_ranges[i] =
devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
tisci_rm->tisci_dev_id,
(char *)range_names[i]);
+ }
/* tchan ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
@@ -3259,13 +4438,9 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
- for (i = 0; i < rm_res->sets; i++) {
- rm_desc = &rm_res->desc[i];
- bitmap_clear(ud->tchan_map, rm_desc->start,
- rm_desc->num);
- dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
- rm_desc->start, rm_desc->num);
- }
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tchan_map,
+ &rm_res->desc[i], "tchan");
}
irq_res.sets = rm_res->sets;
@@ -3275,13 +4450,9 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
- for (i = 0; i < rm_res->sets; i++) {
- rm_desc = &rm_res->desc[i];
- bitmap_clear(ud->rchan_map, rm_desc->start,
- rm_desc->num);
- dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
- rm_desc->start, rm_desc->num);
- }
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rchan_map,
+ &rm_res->desc[i], "rchan");
}
irq_res.sets += rm_res->sets;
@@ -3290,12 +4461,21 @@ static int udma_setup_resources(struct udma_dev *ud)
for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start;
irq_res.desc[i].num = rm_res->desc[i].num;
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
for (j = 0; j < rm_res->sets; j++, i++) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- ud->soc_data->rchan_oes_offset;
- irq_res.desc[i].num = rm_res->desc[j].num;
+ if (rm_res->desc[j].num) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc);
@@ -3311,15 +4491,344 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
ud->rflow_cnt - ud->rchan_cnt);
} else {
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rflow_gp_map,
+ &rm_res->desc[i], "gp-rflow");
+ }
+
+ return 0;
+}
+
+static int bcdma_setup_resources(struct udma_dev *ud)
+{
+ int ret, i, j;
+ struct device *dev = ud->dev;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 cap;
+
+ /* Set up the throughput level start indexes */
+ cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+ if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
+ ud->bchan_tpl.levels = 3;
+ ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
+ ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
+ } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
+ ud->bchan_tpl.levels = 2;
+ ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
+ } else {
+ ud->bchan_tpl.levels = 1;
+ }
+
+ cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
+ if (BCDMA_CAP4_URCHAN_CNT(cap)) {
+ ud->rchan_tpl.levels = 3;
+ ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
+ ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
+ } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
+ ud->rchan_tpl.levels = 2;
+ ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
+ } else {
+ ud->rchan_tpl.levels = 1;
+ }
+
+ if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
+ ud->tchan_tpl.levels = 3;
+ ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
+ ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
+ } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
+ } else {
+ ud->tchan_tpl.levels = 1;
+ }
+
+ ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
+ GFP_KERNEL);
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ /* BCDMA do not really have flows, but the driver expect it */
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+
+ if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
+ !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
+ !ud->rflows)
+ return -ENOMEM;
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++) {
+ if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
+ continue;
+ if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
+ continue;
+ if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
+ continue;
+ if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
+ continue;
+
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+ }
+
+ irq_res.sets = 0;
+
+ /* bchan ranges */
+ if (ud->bchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->bchan_map, ud->bchan_cnt);
+ } else {
+ bitmap_fill(ud->bchan_map, ud->bchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->bchan_map,
+ &rm_res->desc[i],
+ "bchan");
+ }
+ irq_res.sets += rm_res->sets;
+ }
+
+ /* tchan ranges */
+ if (ud->tchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tchan_map,
+ &rm_res->desc[i],
+ "tchan");
+ }
+ irq_res.sets += rm_res->sets * 2;
+ }
+
+ /* rchan ranges */
+ if (ud->rchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rchan_map,
+ &rm_res->desc[i],
+ "rchan");
+ }
+ irq_res.sets += rm_res->sets * 2;
+ }
+
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (ud->bchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
for (i = 0; i < rm_res->sets; i++) {
- rm_desc = &rm_res->desc[i];
- bitmap_clear(ud->rflow_gp_map, rm_desc->start,
- rm_desc->num);
- dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
- rm_desc->start, rm_desc->num);
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
+ }
+ if (ud->tchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
+ }
+ if (ud->rchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
}
}
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pktdma_setup_resources(struct udma_dev *ud)
+{
+ int ret, i, j;
+ struct device *dev = ud->dev;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 cap3;
+
+ /* Set up the throughput level start indexes */
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+ if (UDMA_CAP3_UCHAN_CNT(cap3)) {
+ ud->tchan_tpl.levels = 3;
+ ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ } else {
+ ud->tchan_tpl.levels = 1;
+ }
+
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+ ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
+ !ud->rchans || !ud->rflows || !ud->rflow_in_use)
+ return -ENOMEM;
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++) {
+ if (i == RM_RANGE_BCHAN)
+ continue;
+
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+ }
+
+ /* tchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tchan_map,
+ &rm_res->desc[i], "tchan");
+ }
+
+ /* rchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rchan_map,
+ &rm_res->desc[i], "rchan");
+ }
+
+ /* rflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all rflows are assigned exclusively to Linux */
+ bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
+ } else {
+ bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rflow_in_use,
+ &rm_res->desc[i], "rflow");
+ }
+ irq_res.sets = rm_res->sets;
+
+ /* tflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all tflows are assigned exclusively to Linux */
+ bitmap_zero(ud->tflow_map, ud->tflow_cnt);
+ } else {
+ bitmap_fill(ud->tflow_map, ud->tflow_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tflow_map,
+ &rm_res->desc[i], "tflow");
+ }
+ irq_res.sets += rm_res->sets;
+
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int setup_resources(struct udma_dev *ud)
+{
+ struct device *dev = ud->dev;
+ int ch_count, ret;
+
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ ret = udma_setup_resources(ud);
+ break;
+ case DMA_TYPE_BCDMA:
+ ret = bcdma_setup_resources(ud);
+ break;
+ case DMA_TYPE_PKTDMA:
+ ret = pktdma_setup_resources(ud);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
+ if (ud->bchan_cnt)
+ ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
if (!ch_count)
@@ -3330,12 +4839,40 @@ static int udma_setup_resources(struct udma_dev *ud)
if (!ud->channels)
return -ENOMEM;
- dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
- ch_count,
- ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
- ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
- ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
- ud->rflow_cnt));
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ dev_info(dev,
+ "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map,
+ ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map,
+ ud->rchan_cnt),
+ ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
+ ud->rflow_cnt));
+ break;
+ case DMA_TYPE_BCDMA:
+ dev_info(dev,
+ "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
+ ch_count,
+ ud->bchan_cnt - bitmap_weight(ud->bchan_map,
+ ud->bchan_cnt),
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map,
+ ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map,
+ ud->rchan_cnt));
+ break;
+ case DMA_TYPE_PKTDMA:
+ dev_info(dev,
+ "Channels: %d (tchan: %u, rchan: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map,
+ ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map,
+ ud->rchan_cnt));
+ default:
+ break;
+ }
return ch_count;
}
@@ -3444,20 +4981,33 @@ static void udma_dbg_summary_show_chan(struct seq_file *s,
seq_printf(s, " %-13s| %s", dma_chan_name(chan),
chan->dbg_client_name ?: "in-use");
- seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
+ if (ucc->tr_trigger_type)
+ seq_puts(s, " (triggered, ");
+ else
+ seq_printf(s, " (%s, ",
+ dmaengine_get_direction_text(uc->config.dir));
switch (uc->config.dir) {
case DMA_MEM_TO_MEM:
+ if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ seq_printf(s, "bchan%d)\n", uc->bchan->id);
+ return;
+ }
+
seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
ucc->src_thread, ucc->dst_thread);
break;
case DMA_DEV_TO_MEM:
seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
ucc->src_thread, ucc->dst_thread);
+ if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
+ seq_printf(s, "rflow%d, ", uc->rflow->id);
break;
case DMA_MEM_TO_DEV:
seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
ucc->src_thread, ucc->dst_thread);
+ if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
+ seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
break;
default:
seq_printf(s, ")\n");
@@ -3519,6 +5069,25 @@ static int udma_probe(struct platform_device *pdev)
if (!ud)
return -ENOMEM;
+ match = of_match_node(udma_of_match, dev->of_node);
+ if (!match)
+ match = of_match_node(bcdma_of_match, dev->of_node);
+ if (!match) {
+ match = of_match_node(pktdma_of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ }
+ ud->match_data = match->data;
+
+ soc = soc_device_match(k3_soc_devices);
+ if (!soc) {
+ dev_err(dev, "No compatible SoC found\n");
+ return -ENODEV;
+ }
+ ud->soc_data = soc->data;
+
ret = udma_get_mmrs(pdev, ud);
if (ret)
return ret;
@@ -3542,16 +5111,44 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
- if (!ret && ud->atype > 2) {
- dev_err(dev, "Invalid atype: %u\n", ud->atype);
- return -EINVAL;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
+ &ud->atype);
+ if (!ret && ud->atype > 2) {
+ dev_err(dev, "Invalid atype: %u\n", ud->atype);
+ return -EINVAL;
+ }
+ } else {
+ ret = of_property_read_u32(dev->of_node, "ti,asel",
+ &ud->asel);
+ if (!ret && ud->asel > 15) {
+ dev_err(dev, "Invalid asel: %u\n", ud->asel);
+ return -EINVAL;
+ }
}
ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
- ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ } else {
+ struct k3_ringacc_init_data ring_init_data;
+
+ ring_init_data.tisci = ud->tisci_rm.tisci;
+ ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
+ if (ud->match_data->type == DMA_TYPE_BCDMA) {
+ ring_init_data.num_rings = ud->bchan_cnt +
+ ud->tchan_cnt +
+ ud->rchan_cnt;
+ } else {
+ ring_init_data.num_rings = ud->rflow_cnt +
+ ud->tflow_cnt;
+ }
+
+ ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
+ }
+
if (IS_ERR(ud->ringacc))
return PTR_ERR(ud->ringacc);
@@ -3562,27 +5159,15 @@ static int udma_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- match = of_match_node(udma_of_match, dev->of_node);
- if (!match) {
- dev_err(dev, "No compatible match found\n");
- return -ENODEV;
- }
- ud->match_data = match->data;
-
- soc = soc_device_match(k3_soc_devices);
- if (!soc) {
- dev_err(dev, "No compatible SoC found\n");
- return -ENODEV;
- }
- ud->soc_data = soc->data;
-
dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
- dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+ /* cyclic operation is not supported via PKTDMA */
+ if (ud->match_data->type != DMA_TYPE_PKTDMA) {
+ dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+ ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
+ }
- ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
ud->ddev.device_config = udma_slave_config;
ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
- ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
ud->ddev.device_issue_pending = udma_issue_pending;
ud->ddev.device_tx_status = udma_tx_status;
ud->ddev.device_pause = udma_pause;
@@ -3593,7 +5178,25 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.dbg_summary_show = udma_dbg_summary_show;
#endif
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ ud->ddev.device_alloc_chan_resources =
+ udma_alloc_chan_resources;
+ break;
+ case DMA_TYPE_BCDMA:
+ ud->ddev.device_alloc_chan_resources =
+ bcdma_alloc_chan_resources;
+ ud->ddev.device_router_config = bcdma_router_config;
+ break;
+ case DMA_TYPE_PKTDMA:
+ ud->ddev.device_alloc_chan_resources =
+ pktdma_alloc_chan_resources;
+ break;
+ default:
+ return -EINVAL;
+ }
ud->ddev.device_free_chan_resources = udma_free_chan_resources;
+
ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
@@ -3601,7 +5204,8 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
DESC_METADATA_ENGINE;
- if (ud->match_data->enable_memcpy_support) {
+ if (ud->match_data->enable_memcpy_support &&
+ !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
@@ -3614,7 +5218,7 @@ static int udma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ud->ddev.channels);
INIT_LIST_HEAD(&ud->desc_to_purge);
- ch_count = udma_setup_resources(ud);
+ ch_count = setup_resources(ud);
if (ch_count <= 0)
return ch_count;
@@ -3629,6 +5233,13 @@ static int udma_probe(struct platform_device *pdev)
if (ret)
return ret;
+ for (i = 0; i < ud->bchan_cnt; i++) {
+ struct udma_bchan *bchan = &ud->bchans[i];
+
+ bchan->id = i;
+ bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
+ }
+
for (i = 0; i < ud->tchan_cnt; i++) {
struct udma_tchan *tchan = &ud->tchans[i];
@@ -3655,9 +5266,12 @@ static int udma_probe(struct platform_device *pdev)
uc->ud = ud;
uc->vc.desc_free = udma_desc_free;
uc->id = i;
+ uc->bchan = NULL;
uc->tchan = NULL;
uc->rchan = NULL;
uc->config.remote_thread_id = -1;
+ uc->config.mapped_channel_id = -1;
+ uc->config.default_flow_id = -1;
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
@@ -3696,5 +5310,25 @@ static struct platform_driver udma_driver = {
};
builtin_platform_driver(udma_driver);
+static struct platform_driver bcdma_driver = {
+ .driver = {
+ .name = "ti-bcdma",
+ .of_match_table = bcdma_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = udma_probe,
+};
+builtin_platform_driver(bcdma_driver);
+
+static struct platform_driver pktdma_driver = {
+ .driver = {
+ .name = "ti-pktdma",
+ .of_match_table = pktdma_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = udma_probe,
+};
+builtin_platform_driver(pktdma_driver);
+
/* Private interfaces to UDMA */
#include "k3-udma-private.c"
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
index 09c4529e013d..d349c6d482ae 100644
--- a/drivers/dma/ti/k3-udma.h
+++ b/drivers/dma/ti/k3-udma.h
@@ -18,7 +18,7 @@
#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80
#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88
-/* TCHANRT/RCHANRT registers */
+/* BCHANRT/TCHANRT/RCHANRT registers */
#define UDMA_CHAN_RT_CTL_REG 0x0
#define UDMA_CHAN_RT_SWTRIG_REG 0x8
#define UDMA_CHAN_RT_STDATA_REG 0x80
@@ -45,6 +45,18 @@
#define UDMA_CAP3_HCHAN_CNT(val) (((val) >> 14) & 0x1ff)
#define UDMA_CAP3_UCHAN_CNT(val) (((val) >> 23) & 0x1ff)
+#define BCDMA_CAP2_BCHAN_CNT(val) ((val) & 0x1ff)
+#define BCDMA_CAP2_TCHAN_CNT(val) (((val) >> 9) & 0x1ff)
+#define BCDMA_CAP2_RCHAN_CNT(val) (((val) >> 18) & 0x1ff)
+#define BCDMA_CAP3_HBCHAN_CNT(val) (((val) >> 14) & 0x1ff)
+#define BCDMA_CAP3_UBCHAN_CNT(val) (((val) >> 23) & 0x1ff)
+#define BCDMA_CAP4_HRCHAN_CNT(val) ((val) & 0xff)
+#define BCDMA_CAP4_URCHAN_CNT(val) (((val) >> 8) & 0xff)
+#define BCDMA_CAP4_HTCHAN_CNT(val) (((val) >> 16) & 0xff)
+#define BCDMA_CAP4_UTCHAN_CNT(val) (((val) >> 24) & 0xff)
+
+#define PKTDMA_CAP4_TFLOW_CNT(val) ((val) & 0x3fff)
+
/* UDMA_CHAN_RT_CTL_REG */
#define UDMA_CHAN_RT_CTL_EN BIT(31)
#define UDMA_CHAN_RT_CTL_TDOWN BIT(30)
@@ -82,15 +94,20 @@
*/
#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask))
+/* Address Space Select */
+#define K3_ADDRESS_ASEL_SHIFT 48
+
struct udma_dev;
struct udma_tchan;
struct udma_rchan;
struct udma_rflow;
enum udma_rm_range {
- RM_RANGE_TCHAN = 0,
+ RM_RANGE_BCHAN = 0,
+ RM_RANGE_TCHAN,
RM_RANGE_RCHAN,
RM_RANGE_RFLOW,
+ RM_RANGE_TFLOW,
RM_RANGE_LAST,
};
@@ -112,6 +129,8 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
u32 dst_thread);
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+struct device *xudma_get_device(struct udma_dev *ud);
+struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud);
void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
@@ -136,5 +155,10 @@ void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
+int xudma_get_rflow_ring_offset(struct udma_dev *ud);
+
+int xudma_is_pktdma(struct udma_dev *ud);
+int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id);
+int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id);
#endif /* K3_UDMA_H_ */
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 22faea653ea8..79777550a6ff 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
has_dre = false;
if (!has_dre)
- xdev->common.copy_align = fls(width - 1);
+ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- int ret, i, nr_channels = 1;
+ int ret, i;
+ u32 nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
@@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
/* Register the DMA engine with the core */
- dma_async_device_register(&xdev->common);
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error;
+ }
err = of_dma_controller_register(node, of_dma_xilinx_xlate,
xdev);
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index ce336899d636..66196b293b6c 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1474,17 +1474,17 @@ int scmi_notification_init(struct scmi_handle *handle)
ni->gid = gid;
ni->handle = handle;
+ ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
+ sizeof(char *), GFP_KERNEL);
+ if (!ni->registered_protocols)
+ goto err;
+
ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
0);
if (!ni->notify_wq)
goto err;
- ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
- sizeof(char *), GFP_KERNEL);
- if (!ni->registered_protocols)
- goto err;
-
mutex_init(&ni->pending_mtx);
hash_init(ni->pending_events_handlers);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index b4232d611033..4541b891b733 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -2,21 +2,30 @@
/*
* System Control and Management Interface (SCMI) Sensor Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2020 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
+#include <linux/bitfield.h>
#include <linux/scmi_protocol.h>
#include "common.h"
#include "notify.h"
+#define SCMI_MAX_NUM_SENSOR_AXIS 63
+#define SCMIv2_SENSOR_PROTOCOL 0x10000
+
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
SENSOR_TRIP_POINT_NOTIFY = 0x4,
SENSOR_TRIP_POINT_CONFIG = 0x5,
SENSOR_READING_GET = 0x6,
+ SENSOR_AXIS_DESCRIPTION_GET = 0x7,
+ SENSOR_LIST_UPDATE_INTERVALS = 0x8,
+ SENSOR_CONFIG_GET = 0x9,
+ SENSOR_CONFIG_SET = 0xA,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB,
};
struct scmi_msg_resp_sensor_attributes {
@@ -28,29 +37,106 @@ struct scmi_msg_resp_sensor_attributes {
__le32 reg_size;
};
+/* v3 attributes_low macros */
+#define SUPPORTS_UPDATE_NOTIFY(x) FIELD_GET(BIT(30), (x))
+#define SENSOR_TSTAMP_EXP(x) FIELD_GET(GENMASK(14, 10), (x))
+#define SUPPORTS_TIMESTAMP(x) FIELD_GET(BIT(9), (x))
+#define SUPPORTS_EXTEND_ATTRS(x) FIELD_GET(BIT(8), (x))
+
+/* v2 attributes_high macros */
+#define SENSOR_UPDATE_BASE(x) FIELD_GET(GENMASK(31, 27), (x))
+#define SENSOR_UPDATE_SCALE(x) FIELD_GET(GENMASK(26, 22), (x))
+
+/* v3 attributes_high macros */
+#define SENSOR_AXIS_NUMBER(x) FIELD_GET(GENMASK(21, 16), (x))
+#define SUPPORTS_AXIS(x) FIELD_GET(BIT(8), (x))
+
+/* v3 resolution macros */
+#define SENSOR_RES(x) FIELD_GET(GENMASK(26, 0), (x))
+#define SENSOR_RES_EXP(x) FIELD_GET(GENMASK(31, 27), (x))
+
+struct scmi_msg_resp_attrs {
+ __le32 min_range_low;
+ __le32 min_range_high;
+ __le32 max_range_low;
+ __le32 max_range_high;
+};
+
struct scmi_msg_resp_sensor_description {
__le16 num_returned;
__le16 num_remaining;
- struct {
+ struct scmi_sensor_descriptor {
__le32 id;
__le32 attributes_low;
-#define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31))
-#define NUM_TRIP_POINTS(x) ((x) & 0xff)
+/* Common attributes_low macros */
+#define SUPPORTS_ASYNC_READ(x) FIELD_GET(BIT(31), (x))
+#define NUM_TRIP_POINTS(x) FIELD_GET(GENMASK(7, 0), (x))
__le32 attributes_high;
-#define SENSOR_TYPE(x) ((x) & 0xff)
-#define SENSOR_SCALE(x) (((x) >> 11) & 0x1f)
-#define SENSOR_SCALE_SIGN BIT(4)
-#define SENSOR_SCALE_EXTEND GENMASK(7, 5)
-#define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f)
-#define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f)
- u8 name[SCMI_MAX_STR_SIZE];
- } desc[0];
+/* Common attributes_high macros */
+#define SENSOR_SCALE(x) FIELD_GET(GENMASK(15, 11), (x))
+#define SENSOR_SCALE_SIGN BIT(4)
+#define SENSOR_SCALE_EXTEND GENMASK(31, 5)
+#define SENSOR_TYPE(x) FIELD_GET(GENMASK(7, 0), (x))
+ u8 name[SCMI_MAX_STR_SIZE];
+ /* only for version > 2.0 */
+ __le32 power;
+ __le32 resolution;
+ struct scmi_msg_resp_attrs scalar_attrs;
+ } desc[];
};
-struct scmi_msg_sensor_trip_point_notify {
+/* Base scmi_sensor_descriptor size excluding extended attrs after name */
+#define SCMI_MSG_RESP_SENS_DESCR_BASE_SZ 28
+
+/* Sign extend to a full s32 */
+#define S32_EXT(v) \
+ ({ \
+ int __v = (v); \
+ \
+ if (__v & SENSOR_SCALE_SIGN) \
+ __v |= SENSOR_SCALE_EXTEND; \
+ __v; \
+ })
+
+struct scmi_msg_sensor_axis_description_get {
+ __le32 id;
+ __le32 axis_desc_index;
+};
+
+struct scmi_msg_resp_sensor_axis_description {
+ __le32 num_axis_flags;
+#define NUM_AXIS_RETURNED(x) FIELD_GET(GENMASK(5, 0), (x))
+#define NUM_AXIS_REMAINING(x) FIELD_GET(GENMASK(31, 26), (x))
+ struct scmi_axis_descriptor {
+ __le32 id;
+ __le32 attributes_low;
+ __le32 attributes_high;
+ u8 name[SCMI_MAX_STR_SIZE];
+ __le32 resolution;
+ struct scmi_msg_resp_attrs attrs;
+ } desc[];
+};
+
+/* Base scmi_axis_descriptor size excluding extended attrs after name */
+#define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ 28
+
+struct scmi_msg_sensor_list_update_intervals {
+ __le32 id;
+ __le32 index;
+};
+
+struct scmi_msg_resp_sensor_list_update_intervals {
+ __le32 num_intervals_flags;
+#define NUM_INTERVALS_RETURNED(x) FIELD_GET(GENMASK(11, 0), (x))
+#define SEGMENTED_INTVL_FORMAT(x) FIELD_GET(BIT(12), (x))
+#define NUM_INTERVALS_REMAINING(x) FIELD_GET(GENMASK(31, 16), (x))
+ __le32 intervals[];
+};
+
+struct scmi_msg_sensor_request_notify {
__le32 id;
__le32 event_control;
-#define SENSOR_TP_NOTIFY_ALL BIT(0)
+#define SENSOR_NOTIFY_ALL BIT(0)
};
struct scmi_msg_set_sensor_trip_point {
@@ -66,18 +152,46 @@ struct scmi_msg_set_sensor_trip_point {
__le32 value_high;
};
+struct scmi_msg_sensor_config_set {
+ __le32 id;
+ __le32 sensor_config;
+};
+
struct scmi_msg_sensor_reading_get {
__le32 id;
__le32 flags;
#define SENSOR_READ_ASYNC BIT(0)
};
+struct scmi_resp_sensor_reading_complete {
+ __le32 id;
+ __le64 readings;
+};
+
+struct scmi_sensor_reading_resp {
+ __le32 sensor_value_low;
+ __le32 sensor_value_high;
+ __le32 timestamp_low;
+ __le32 timestamp_high;
+};
+
+struct scmi_resp_sensor_reading_complete_v3 {
+ __le32 id;
+ struct scmi_sensor_reading_resp readings[];
+};
+
struct scmi_sensor_trip_notify_payld {
__le32 agent_id;
__le32 sensor_id;
__le32 trip_point_desc;
};
+struct scmi_sensor_update_notify_payld {
+ __le32 agent_id;
+ __le32 sensor_id;
+ struct scmi_sensor_reading_resp readings[];
+};
+
struct sensors_info {
u32 version;
int num_sensors;
@@ -114,6 +228,194 @@ static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
return ret;
}
+static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out,
+ struct scmi_msg_resp_attrs *in)
+{
+ out->min_range = get_unaligned_le64((void *)&in->min_range_low);
+ out->max_range = get_unaligned_le64((void *)&in->max_range_low);
+}
+
+static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
+ struct scmi_sensor_info *s)
+{
+ int ret, cnt;
+ u32 desc_index = 0;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *ti;
+ struct scmi_msg_resp_sensor_list_update_intervals *buf;
+ struct scmi_msg_sensor_list_update_intervals *msg;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_LIST_UPDATE_INTERVALS,
+ SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &ti);
+ if (ret)
+ return ret;
+
+ buf = ti->rx.buf;
+ do {
+ u32 flags;
+
+ msg = ti->tx.buf;
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(s->id);
+ msg->index = cpu_to_le32(desc_index);
+
+ ret = scmi_do_xfer(handle, ti);
+ if (ret)
+ break;
+
+ flags = le32_to_cpu(buf->num_intervals_flags);
+ num_returned = NUM_INTERVALS_RETURNED(flags);
+ num_remaining = NUM_INTERVALS_REMAINING(flags);
+
+ /*
+ * Max intervals is not declared previously anywhere so we
+ * assume it's returned+remaining.
+ */
+ if (!s->intervals.count) {
+ s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags);
+ s->intervals.count = num_returned + num_remaining;
+ /* segmented intervals are reported in one triplet */
+ if (s->intervals.segmented &&
+ (num_remaining || num_returned != 3)) {
+ dev_err(handle->dev,
+ "Sensor ID:%d advertises an invalid segmented interval (%d)\n",
+ s->id, s->intervals.count);
+ s->intervals.segmented = false;
+ s->intervals.count = 0;
+ ret = -EINVAL;
+ break;
+ }
+ /* Direct allocation when exceeding pre-allocated */
+ if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
+ s->intervals.desc =
+ devm_kcalloc(handle->dev,
+ s->intervals.count,
+ sizeof(*s->intervals.desc),
+ GFP_KERNEL);
+ if (!s->intervals.desc) {
+ s->intervals.segmented = false;
+ s->intervals.count = 0;
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ } else if (desc_index + num_returned > s->intervals.count) {
+ dev_err(handle->dev,
+ "No. of update intervals can't exceed %d\n",
+ s->intervals.count);
+ ret = -EINVAL;
+ break;
+ }
+
+ for (cnt = 0; cnt < num_returned; cnt++)
+ s->intervals.desc[desc_index + cnt] =
+ le32_to_cpu(buf->intervals[cnt]);
+
+ desc_index += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, ti);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ scmi_xfer_put(handle, ti);
+ return ret;
+}
+
+static int scmi_sensor_axis_description(const struct scmi_handle *handle,
+ struct scmi_sensor_info *s)
+{
+ int ret, cnt;
+ u32 desc_index = 0;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *te;
+ struct scmi_msg_resp_sensor_axis_description *buf;
+ struct scmi_msg_sensor_axis_description_get *msg;
+
+ s->axis = devm_kcalloc(handle->dev, s->num_axis,
+ sizeof(*s->axis), GFP_KERNEL);
+ if (!s->axis)
+ return -ENOMEM;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_AXIS_DESCRIPTION_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &te);
+ if (ret)
+ return ret;
+
+ buf = te->rx.buf;
+ do {
+ u32 flags;
+ struct scmi_axis_descriptor *adesc;
+
+ msg = te->tx.buf;
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(s->id);
+ msg->axis_desc_index = cpu_to_le32(desc_index);
+
+ ret = scmi_do_xfer(handle, te);
+ if (ret)
+ break;
+
+ flags = le32_to_cpu(buf->num_axis_flags);
+ num_returned = NUM_AXIS_RETURNED(flags);
+ num_remaining = NUM_AXIS_REMAINING(flags);
+
+ if (desc_index + num_returned > s->num_axis) {
+ dev_err(handle->dev, "No. of axis can't exceed %d\n",
+ s->num_axis);
+ break;
+ }
+
+ adesc = &buf->desc[0];
+ for (cnt = 0; cnt < num_returned; cnt++) {
+ u32 attrh, attrl;
+ struct scmi_sensor_axis_info *a;
+ size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
+
+ attrl = le32_to_cpu(adesc->attributes_low);
+
+ a = &s->axis[desc_index + cnt];
+
+ a->id = le32_to_cpu(adesc->id);
+ a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
+
+ attrh = le32_to_cpu(adesc->attributes_high);
+ a->scale = S32_EXT(SENSOR_SCALE(attrh));
+ a->type = SENSOR_TYPE(attrh);
+ strlcpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+
+ if (a->extended_attrs) {
+ unsigned int ares =
+ le32_to_cpu(adesc->resolution);
+
+ a->resolution = SENSOR_RES(ares);
+ a->exponent =
+ S32_EXT(SENSOR_RES_EXP(ares));
+ dsize += sizeof(adesc->resolution);
+
+ scmi_parse_range_attrs(&a->attrs,
+ &adesc->attrs);
+ dsize += sizeof(adesc->attrs);
+ }
+
+ adesc = (typeof(adesc))((u8 *)adesc + dsize);
+ }
+
+ desc_index += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, te);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ scmi_xfer_put(handle, te);
+ return ret;
+}
+
static int scmi_sensor_description_get(const struct scmi_handle *handle,
struct sensors_info *si)
{
@@ -131,9 +433,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
buf = t->rx.buf;
do {
+ struct scmi_sensor_descriptor *sdesc;
+
/* Set the number of sensors to be skipped/already read */
put_unaligned_le32(desc_index, t->tx.buf);
-
ret = scmi_do_xfer(handle, t);
if (ret)
break;
@@ -147,22 +450,97 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
break;
}
+ sdesc = &buf->desc[0];
for (cnt = 0; cnt < num_returned; cnt++) {
u32 attrh, attrl;
struct scmi_sensor_info *s;
+ size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ;
- attrl = le32_to_cpu(buf->desc[cnt].attributes_low);
- attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
s = &si->sensors[desc_index + cnt];
- s->id = le32_to_cpu(buf->desc[cnt].id);
- s->type = SENSOR_TYPE(attrh);
- s->scale = SENSOR_SCALE(attrh);
- /* Sign extend to a full s8 */
- if (s->scale & SENSOR_SCALE_SIGN)
- s->scale |= SENSOR_SCALE_EXTEND;
+ s->id = le32_to_cpu(sdesc->id);
+
+ attrl = le32_to_cpu(sdesc->attributes_low);
+ /* common bitfields parsing */
s->async = SUPPORTS_ASYNC_READ(attrl);
s->num_trip_points = NUM_TRIP_POINTS(attrl);
- strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
+ /**
+ * only SCMIv3.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
+ s->timestamped = SUPPORTS_TIMESTAMP(attrl);
+ if (s->timestamped)
+ s->tstamp_scale =
+ S32_EXT(SENSOR_TSTAMP_EXP(attrl));
+ s->extended_scalar_attrs =
+ SUPPORTS_EXTEND_ATTRS(attrl);
+
+ attrh = le32_to_cpu(sdesc->attributes_high);
+ /* common bitfields parsing */
+ s->scale = S32_EXT(SENSOR_SCALE(attrh));
+ s->type = SENSOR_TYPE(attrh);
+ /* Use pre-allocated pool wherever possible */
+ s->intervals.desc = s->intervals.prealloc_pool;
+ if (si->version == SCMIv2_SENSOR_PROTOCOL) {
+ s->intervals.segmented = false;
+ s->intervals.count = 1;
+ /*
+ * Convert SCMIv2.0 update interval format to
+ * SCMIv3.0 to be used as the common exposed
+ * descriptor, accessible via common macros.
+ */
+ s->intervals.desc[0] =
+ (SENSOR_UPDATE_BASE(attrh) << 5) |
+ SENSOR_UPDATE_SCALE(attrh);
+ } else {
+ /*
+ * From SCMIv3.0 update intervals are retrieved
+ * via a dedicated (optional) command.
+ * Since the command is optional, on error carry
+ * on without any update interval.
+ */
+ if (scmi_sensor_update_intervals(handle, s))
+ dev_dbg(handle->dev,
+ "Update Intervals not available for sensor ID:%d\n",
+ s->id);
+ }
+ /**
+ * only > SCMIv2.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->num_axis = min_t(unsigned int,
+ SUPPORTS_AXIS(attrh) ?
+ SENSOR_AXIS_NUMBER(attrh) : 0,
+ SCMI_MAX_NUM_SENSOR_AXIS);
+ strlcpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE);
+
+ if (s->extended_scalar_attrs) {
+ s->sensor_power = le32_to_cpu(sdesc->power);
+ dsize += sizeof(sdesc->power);
+ /* Only for sensors reporting scalar values */
+ if (s->num_axis == 0) {
+ unsigned int sres =
+ le32_to_cpu(sdesc->resolution);
+
+ s->resolution = SENSOR_RES(sres);
+ s->exponent =
+ S32_EXT(SENSOR_RES_EXP(sres));
+ dsize += sizeof(sdesc->resolution);
+
+ scmi_parse_range_attrs(&s->scalar_attrs,
+ &sdesc->scalar_attrs);
+ dsize += sizeof(sdesc->scalar_attrs);
+ }
+ }
+ if (s->num_axis > 0) {
+ ret = scmi_sensor_axis_description(handle, s);
+ if (ret)
+ goto out;
+ }
+
+ sdesc = (typeof(sdesc))((u8 *)sdesc + dsize);
}
desc_index += num_returned;
@@ -174,19 +552,21 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
*/
} while (num_returned && num_remaining);
+out:
scmi_xfer_put(handle, t);
return ret;
}
-static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
- u32 sensor_id, bool enable)
+static inline int
+scmi_sensor_request_notify(const struct scmi_handle *handle, u32 sensor_id,
+ u8 message_id, bool enable)
{
int ret;
- u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0;
+ u32 evt_cntl = enable ? SENSOR_NOTIFY_ALL : 0;
struct scmi_xfer *t;
- struct scmi_msg_sensor_trip_point_notify *cfg;
+ struct scmi_msg_sensor_request_notify *cfg;
- ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY,
+ ret = scmi_xfer_get_init(handle, message_id,
SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -201,6 +581,23 @@ static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
return ret;
}
+static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
+ u32 sensor_id, bool enable)
+{
+ return scmi_sensor_request_notify(handle, sensor_id,
+ SENSOR_TRIP_POINT_NOTIFY,
+ enable);
+}
+
+static int
+scmi_sensor_continuous_update_notify(const struct scmi_handle *handle,
+ u32 sensor_id, bool enable)
+{
+ return scmi_sensor_request_notify(handle, sensor_id,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY,
+ enable);
+}
+
static int
scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
u8 trip_id, u64 trip_value)
@@ -227,6 +624,75 @@ scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
return ret;
}
+static int scmi_sensor_config_get(const struct scmi_handle *handle,
+ u32 sensor_id, u32 *sensor_config)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(__le32),
+ sizeof(__le32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ *sensor_config = get_unaligned_le64(t->rx.buf);
+ s->sensor_config = *sensor_config;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_sensor_config_set(const struct scmi_handle *handle,
+ u32 sensor_id, u32 sensor_config)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_config_set *msg;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->id = cpu_to_le32(sensor_id);
+ msg->sensor_config = cpu_to_le32(sensor_config);
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ s->sensor_config = sensor_config;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+/**
+ * scmi_sensor_reading_get - Read scalar sensor value
+ * @handle: Platform handle
+ * @sensor_id: Sensor ID
+ * @value: The 64bit value sensor reading
+ *
+ * This function returns a single 64 bit reading value representing the sensor
+ * value; if the platform SCMI Protocol implementation and the sensor support
+ * multiple axis and timestamped-reads, this just returns the first axis while
+ * dropping the timestamp value.
+ * Use instead the @scmi_sensor_reading_get_timestamped to retrieve the array of
+ * timestamped multi-axis values.
+ *
+ * Return: 0 on Success
+ */
static int scmi_sensor_reading_get(const struct scmi_handle *handle,
u32 sensor_id, u64 *value)
{
@@ -237,20 +703,24 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
struct scmi_sensor_info *s = si->sensors + sensor_id;
ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
- sizeof(u64), &t);
+ SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
if (ret)
return ret;
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
-
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = scmi_do_xfer_with_response(handle, t);
- if (!ret)
- *value = get_unaligned_le64((void *)
- ((__le32 *)t->rx.buf + 1));
+ if (!ret) {
+ struct scmi_resp_sensor_reading_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->id) == sensor_id)
+ *value = get_unaligned_le64(&resp->readings);
+ else
+ ret = -EPROTO;
+ }
} else {
sensor->flags = cpu_to_le32(0);
ret = scmi_do_xfer(handle, t);
@@ -262,6 +732,84 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
return ret;
}
+static inline void
+scmi_parse_sensor_readings(struct scmi_sensor_reading *out,
+ const struct scmi_sensor_reading_resp *in)
+{
+ out->value = get_unaligned_le64((void *)&in->sensor_value_low);
+ out->timestamp = get_unaligned_le64((void *)&in->timestamp_low);
+}
+
+/**
+ * scmi_sensor_reading_get_timestamped - Read multiple-axis timestamped values
+ * @handle: Platform handle
+ * @sensor_id: Sensor ID
+ * @count: The length of the provided @readings array
+ * @readings: An array of elements each representing a timestamped per-axis
+ * reading of type @struct scmi_sensor_reading.
+ * Returned readings are ordered as the @axis descriptors array
+ * included in @struct scmi_sensor_info and the max number of
+ * returned elements is min(@count, @num_axis); ideally the provided
+ * array should be of length @count equal to @num_axis.
+ *
+ * Return: 0 on Success
+ */
+static int
+scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
+ u32 sensor_id, u8 count,
+ struct scmi_sensor_reading *readings)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_reading_get *sensor;
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ if (!count || !readings ||
+ (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
+ return -EINVAL;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
+ if (ret)
+ return ret;
+
+ sensor = t->tx.buf;
+ sensor->id = cpu_to_le32(sensor_id);
+ if (s->async) {
+ sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
+ ret = scmi_do_xfer_with_response(handle, t);
+ if (!ret) {
+ int i;
+ struct scmi_resp_sensor_reading_complete_v3 *resp;
+
+ resp = t->rx.buf;
+ /* Retrieve only the number of requested axis anyway */
+ if (le32_to_cpu(resp->id) == sensor_id)
+ for (i = 0; i < count; i++)
+ scmi_parse_sensor_readings(&readings[i],
+ &resp->readings[i]);
+ else
+ ret = -EPROTO;
+ }
+ } else {
+ sensor->flags = cpu_to_le32(0);
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ int i;
+ struct scmi_sensor_reading_resp *resp_readings;
+
+ resp_readings = t->rx.buf;
+ for (i = 0; i < count; i++)
+ scmi_parse_sensor_readings(&readings[i],
+ &resp_readings[i]);
+ }
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
static const struct scmi_sensor_info *
scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id)
{
@@ -282,6 +830,9 @@ static const struct scmi_sensor_ops sensor_ops = {
.info_get = scmi_sensor_info_get,
.trip_point_config = scmi_sensor_trip_point_config,
.reading_get = scmi_sensor_reading_get,
+ .reading_get_timestamped = scmi_sensor_reading_get_timestamped,
+ .config_get = scmi_sensor_config_get,
+ .config_set = scmi_sensor_config_set,
};
static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
@@ -289,7 +840,19 @@ static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
{
int ret;
- ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ switch (evt_id) {
+ case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
+ ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ break;
+ case SCMI_EVENT_SENSOR_UPDATE:
+ ret = scmi_sensor_continuous_update_notify(handle, src_id,
+ enable);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
@@ -302,20 +865,59 @@ static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
- const struct scmi_sensor_trip_notify_payld *p = payld;
- struct scmi_sensor_trip_point_report *r = report;
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
+ {
+ const struct scmi_sensor_trip_notify_payld *p = payld;
+ struct scmi_sensor_trip_point_report *r = report;
- if (evt_id != SCMI_EVENT_SENSOR_TRIP_POINT_EVENT ||
- sizeof(*p) != payld_sz)
- return NULL;
+ if (sizeof(*p) != payld_sz)
+ break;
- r->timestamp = timestamp;
- r->agent_id = le32_to_cpu(p->agent_id);
- r->sensor_id = le32_to_cpu(p->sensor_id);
- r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
- *src_id = r->sensor_id;
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
+ *src_id = r->sensor_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_SENSOR_UPDATE:
+ {
+ int i;
+ struct scmi_sensor_info *s;
+ const struct scmi_sensor_update_notify_payld *p = payld;
+ struct scmi_sensor_update_report *r = report;
+ struct sensors_info *sinfo = handle->sensor_priv;
+
+ /* payld_sz is variable for this event */
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ if (r->sensor_id >= sinfo->num_sensors)
+ break;
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ s = &sinfo->sensors[r->sensor_id];
+ /*
+ * The generated report r (@struct scmi_sensor_update_report)
+ * was pre-allocated to contain up to SCMI_MAX_NUM_SENSOR_AXIS
+ * readings: here it is filled with the effective @num_axis
+ * readings defined for this sensor or 1 for scalar sensors.
+ */
+ r->readings_count = s->num_axis ?: 1;
+ for (i = 0; i < r->readings_count; i++)
+ scmi_parse_sensor_readings(&r->readings[i],
+ &p->readings[i]);
+ *src_id = r->sensor_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
- return r;
+ return rep;
}
static const struct scmi_event sensor_events[] = {
@@ -324,6 +926,16 @@ static const struct scmi_event sensor_events[] = {
.max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld),
.max_report_sz = sizeof(struct scmi_sensor_trip_point_report),
},
+ {
+ .id = SCMI_EVENT_SENSOR_UPDATE,
+ .max_payld_sz =
+ sizeof(struct scmi_sensor_update_notify_payld) +
+ SCMI_MAX_NUM_SENSOR_AXIS *
+ sizeof(struct scmi_sensor_reading_resp),
+ .max_report_sz = sizeof(struct scmi_sensor_update_report) +
+ SCMI_MAX_NUM_SENSOR_AXIS *
+ sizeof(struct scmi_sensor_reading),
+ },
};
static const struct scmi_event_ops sensor_event_ops = {
@@ -334,6 +946,7 @@ static const struct scmi_event_ops sensor_event_ops = {
static int scmi_sensors_protocol_init(struct scmi_handle *handle)
{
u32 version;
+ int ret;
struct sensors_info *sinfo;
scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version);
@@ -344,15 +957,19 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
+ sinfo->version = version;
- scmi_sensor_attributes_get(handle, sinfo);
-
+ ret = scmi_sensor_attributes_get(handle, sinfo);
+ if (ret)
+ return ret;
sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors,
sizeof(*sinfo->sensors), GFP_KERNEL);
if (!sinfo->sensors)
return -ENOMEM;
- scmi_sensor_description_get(handle, sinfo);
+ ret = scmi_sensor_description_get(handle, sinfo);
+ if (ret)
+ return ret;
scmi_register_protocol_events(handle,
SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ,
@@ -360,9 +977,8 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
ARRAY_SIZE(sensor_events),
sinfo->num_sensors);
- sinfo->version = version;
- handle->sensor_ops = &sensor_ops;
handle->sensor_priv = sinfo;
+ handle->sensor_ops = &sensor_ops;
return 0;
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d9895491ff34..2c3dac5ecb36 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -122,7 +122,7 @@ config EFI_ARMSTUB_DTB_LOADER
config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
bool "Enable the command line initrd loader" if !X86
depends on EFI_STUB && (EFI_GENERIC_STUB || X86)
- default y
+ default y if X86
depends on !RISCV
help
Select this config option to add support for the initrd= command
@@ -147,7 +147,7 @@ config EFI_BOOTLOADER_CONTROL
config EFI_CAPSULE_LOADER
tristate "EFI capsule loader"
- depends on EFI
+ depends on EFI && !IA64
help
This option exposes a loader interface "/dev/efi_capsule_loader" for
users to load EFI capsules. This driver requires working runtime
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index d6ca2da19339..467e94259679 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -12,7 +12,10 @@ KASAN_SANITIZE_runtime-wrappers.o := n
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o
-obj-$(CONFIG_EFI) += capsule.o memmap.o
+obj-$(CONFIG_EFI) += memmap.o
+ifneq ($(CONFIG_EFI_CAPSULE_LOADER),)
+obj-$(CONFIG_EFI) += capsule.o
+endif
obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 34f53d898acb..e1926483ae2f 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -3,8 +3,9 @@
* apple-properties.c - EFI device properties on Macs
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
*
- * Note, all properties are considered as u8 arrays.
- * To get a value of any of them the caller must use device_property_read_u8_array().
+ * Properties are stored either as:
+ * u8 arrays which can be retrieved with device_property_read_u8_array() or
+ * booleans which can be queried with device_property_present().
*/
#define pr_fmt(fmt) "apple-properties: " fmt
@@ -88,8 +89,12 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
entry_data = ptr + key_len + sizeof(val_len);
entry_len = val_len - sizeof(val_len);
- entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
- entry_len);
+ if (entry_len)
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
+ else
+ entry[i] = PROPERTY_ENTRY_BOOL(key);
+
if (dump_properties) {
dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
index 598b7800d14e..768430293669 100644
--- a/drivers/firmware/efi/capsule.c
+++ b/drivers/firmware/efi/capsule.c
@@ -12,6 +12,7 @@
#include <linux/highmem.h>
#include <linux/efi.h>
#include <linux/vmalloc.h>
+#include <asm/efi.h>
#include <asm/io.h>
typedef struct {
@@ -244,7 +245,7 @@ int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
for (i = 0; i < sg_count; i++) {
efi_capsule_block_desc_t *sglist;
- sglist = kmap(sg_pages[i]);
+ sglist = kmap_atomic(sg_pages[i]);
for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
u64 sz = min_t(u64, imagesize,
@@ -265,7 +266,18 @@ int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
else
sglist[j].data = page_to_phys(sg_pages[i + 1]);
- kunmap(sg_pages[i]);
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ /*
+ * At runtime, the firmware has no way to find out where the
+ * sglist elements are mapped, if they are mapped in the first
+ * place. Therefore, on architectures that can only perform
+ * cache maintenance by virtual address, the firmware is unable
+ * to perform this maintenance, and so it is up to the OS to do
+ * it instead.
+ */
+ efi_capsule_flush_cache_range(sglist, PAGE_SIZE);
+#endif
+ kunmap_atomic(sglist);
}
mutex_lock(&capsule_mutex);
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 914a343c7785..ec2f3985bef3 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -273,7 +273,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
install_memreserve_table();
status = allocate_new_fdt_and_exit_boot(handle, &fdt_addr,
- efi_get_max_fdt_addr(image_addr),
initrd_addr, initrd_size,
cmdline_ptr, fdt_addr, fdt_size);
if (status != EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 2d7abcd99de9..b50a6c67d9bd 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -750,7 +750,6 @@ efi_status_t efi_exit_boot_services(void *handle,
efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
- unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
char *cmdline_ptr,
unsigned long fdt_addr,
@@ -848,4 +847,6 @@ asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
void efi_handle_post_ebs_state(void);
+enum efi_secureboot_mode efi_get_secureboot(void);
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 368cd60000ee..365c3a43a198 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -238,7 +238,6 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
- unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
char *cmdline_ptr,
unsigned long fdt_addr,
@@ -275,7 +274,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
efi_info("Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
- status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, max_addr);
+ status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX);
if (status != EFI_SUCCESS) {
efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 5efc524b14be..8a18930f3eb6 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -12,44 +12,34 @@
#include "efistub.h"
-/* BIOS variables */
-static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
-static const efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
-static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
-
/* SHIM variables */
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
+ unsigned long *data_size, void *data)
+{
+ return get_efi_var(name, vendor, attr, data_size, data);
+}
+
/*
* Determine whether we're in secure boot mode.
- *
- * Please keep the logic in sync with
- * arch/x86/xen/efi.c:xen_efi_get_secureboot().
*/
enum efi_secureboot_mode efi_get_secureboot(void)
{
u32 attr;
- u8 secboot, setupmode, moksbstate;
unsigned long size;
+ enum efi_secureboot_mode mode;
efi_status_t status;
+ u8 moksbstate;
- size = sizeof(secboot);
- status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
- NULL, &size, &secboot);
- if (status == EFI_NOT_FOUND)
- return efi_secureboot_mode_disabled;
- if (status != EFI_SUCCESS)
- goto out_efi_err;
-
- size = sizeof(setupmode);
- status = get_efi_var(efi_SetupMode_name, &efi_variable_guid,
- NULL, &size, &setupmode);
- if (status != EFI_SUCCESS)
- goto out_efi_err;
-
- if (secboot == 0 || setupmode == 1)
- return efi_secureboot_mode_disabled;
+ mode = efi_get_secureboot_mode(get_var);
+ if (mode == efi_secureboot_mode_unknown) {
+ efi_err("Could not determine UEFI Secure Boot status.\n");
+ return efi_secureboot_mode_unknown;
+ }
+ if (mode != efi_secureboot_mode_enabled)
+ return mode;
/*
* See if a user has put the shim into insecure mode. If so, and if the
@@ -69,8 +59,4 @@ enum efi_secureboot_mode efi_get_secureboot(void)
secure_boot_enabled:
efi_info("UEFI Secure Boot is enabled.\n");
return efi_secureboot_mode_enabled;
-
-out_efi_err:
- efi_err("Could not determine UEFI Secure Boot status.\n");
- return efi_secureboot_mode_unknown;
}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 3672539cb96e..f14c4ff5839f 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -715,8 +715,11 @@ unsigned long efi_main(efi_handle_t handle,
(IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE) ||
(IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
(image_offset == 0)) {
+ extern char _bss[];
+
status = efi_relocate_kernel(&bzimage_addr,
- hdr->init_size, hdr->init_size,
+ (unsigned long)_bss - bzimage_addr,
+ hdr->init_size,
hdr->pref_address,
hdr->kernel_alignment,
LOAD_PHYSICAL_ADDR);
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index ddf9eae396fe..47d67bb0a516 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -663,6 +663,19 @@ out:
return rv;
}
+static long efi_runtime_get_supported_mask(unsigned long arg)
+{
+ unsigned int __user *supported_mask;
+ int rv = 0;
+
+ supported_mask = (unsigned int *)arg;
+
+ if (put_user(efi.runtime_supported_mask, supported_mask))
+ rv = -EFAULT;
+
+ return rv;
+}
+
static long efi_test_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -699,6 +712,9 @@ static long efi_test_ioctl(struct file *file, unsigned int cmd,
case EFI_RUNTIME_RESET_SYSTEM:
return efi_runtime_reset_system(arg);
+
+ case EFI_RUNTIME_GET_SUPPORTED_MASK:
+ return efi_runtime_get_supported_mask(arg);
}
return -ENOTTY;
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
index f2446aa1c2e3..117349e57993 100644
--- a/drivers/firmware/efi/test/efi_test.h
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -118,4 +118,7 @@ struct efi_resetsystem {
#define EFI_RUNTIME_RESET_SYSTEM \
_IOW('p', 0x0B, struct efi_resetsystem)
+#define EFI_RUNTIME_GET_SUPPORTED_MASK \
+ _IOR('p', 0x0C, unsigned int)
+
#endif /* _DRIVERS_FIRMWARE_EFI_TEST_H_ */
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 1d2e5b85d7ca..c027d99f2a59 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -13,6 +13,7 @@ config IMX_DSP
config IMX_SCU
bool "IMX SCU Protocol driver"
depends on IMX_MBOX
+ select SOC_BUS
help
The System Controller Firmware (SCFW) is a low-level system function
which runs on a dedicated Cortex-M core to provide power, clock, and
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 4265e9dbed84..a6c06d7476c3 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -60,22 +60,40 @@ static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
}
}
-static int imx_dsp_probe(struct platform_device *pdev)
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
{
- struct device *dev = &pdev->dev;
- struct imx_dsp_ipc *dsp_ipc;
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return ERR_PTR(-EINVAL);
+
+ dsp_chan = &dsp_ipc->chans[idx];
+ dsp_chan->ch = mbox_request_channel_byname(&dsp_chan->cl, dsp_chan->name);
+ return dsp_chan->ch;
+}
+EXPORT_SYMBOL(imx_dsp_request_channel);
+
+void imx_dsp_free_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
+{
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return;
+
+ dsp_chan = &dsp_ipc->chans[idx];
+ mbox_free_channel(dsp_chan->ch);
+}
+EXPORT_SYMBOL(imx_dsp_free_channel);
+
+static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
+{
+ struct device *dev = dsp_ipc->dev;
struct imx_dsp_chan *dsp_chan;
struct mbox_client *cl;
char *chan_name;
int ret;
int i, j;
- device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
-
- dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
- if (!dsp_ipc)
- return -ENOMEM;
-
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
if (i < 2)
chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
@@ -86,6 +104,7 @@ static int imx_dsp_probe(struct platform_device *pdev)
return -ENOMEM;
dsp_chan = &dsp_ipc->chans[i];
+ dsp_chan->name = chan_name;
cl = &dsp_chan->cl;
cl->dev = dev;
cl->tx_block = false;
@@ -104,27 +123,43 @@ static int imx_dsp_probe(struct platform_device *pdev)
}
dev_dbg(dev, "request mbox chan %s\n", chan_name);
- /* chan_name is not used anymore by framework */
- kfree(chan_name);
}
- dsp_ipc->dev = dev;
-
- dev_set_drvdata(dev, dsp_ipc);
-
- dev_info(dev, "NXP i.MX DSP IPC initialized\n");
-
return 0;
out:
- kfree(chan_name);
for (j = 0; j < i; j++) {
dsp_chan = &dsp_ipc->chans[j];
mbox_free_channel(dsp_chan->ch);
+ kfree(dsp_chan->name);
}
return ret;
}
+static int imx_dsp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_dsp_ipc *dsp_ipc;
+ int ret;
+
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+ dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
+ if (!dsp_ipc)
+ return -ENOMEM;
+
+ dsp_ipc->dev = dev;
+ dev_set_drvdata(dev, dsp_ipc);
+
+ ret = imx_dsp_setup_channels(dsp_ipc);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "NXP i.MX DSP IPC initialized\n");
+
+ return 0;
+}
+
static int imx_dsp_remove(struct platform_device *pdev)
{
struct imx_dsp_chan *dsp_chan;
@@ -136,6 +171,7 @@ static int imx_dsp_remove(struct platform_device *pdev)
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
dsp_chan = &dsp_ipc->chans[i];
mbox_free_channel(dsp_chan->ch);
+ kfree(dsp_chan->name);
}
return 0;
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 946eea292b52..08533ee67626 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -160,12 +160,18 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
{ "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
+ { "mipi1", IMX_SC_R_MIPI_1, 1, false, 0 },
+ { "mipi1-pwm0", IMX_SC_R_MIPI_1_PWM_0, 1, false, 0 },
+ { "mipi1-i2c", IMX_SC_R_MIPI_1_I2C_0, 2, true, 0 },
+
/* LVDS SS */
{ "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
+ { "lvds1", IMX_SC_R_LVDS_1, 1, false, 0 },
/* DC SS */
{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
+ { "dc0-video", IMX_SC_R_DC_0_VIDEO0, 2, true, 0 },
/* CM40 SS */
{ "cm40-i2c", IMX_SC_R_M4_0_I2C, 1, false, 0 },
@@ -180,6 +186,12 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "cm41-pid", IMX_SC_R_M4_1_PID0, 5, true, 0},
{ "cm41-mu-a1", IMX_SC_R_M4_1_MU_1A, 1, false, 0},
{ "cm41-lpuart", IMX_SC_R_M4_1_UART, 1, false, 0},
+
+ /* IMAGE SS */
+ { "img-jpegdec-mp", IMX_SC_R_MJPEG_DEC_MP, 1, false, 0 },
+ { "img-jpegdec-s0", IMX_SC_R_MJPEG_DEC_S0, 4, true, 0 },
+ { "img-jpegenc-mp", IMX_SC_R_MJPEG_ENC_MP, 1, false, 0 },
+ { "img-jpegenc-s0", IMX_SC_R_MJPEG_ENC_S0, 4, true, 0 },
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index 2671dcd0ad92..f2fdd3756648 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -3,8 +3,9 @@
# Amlogic Secure Monitor driver
#
config MESON_SM
- bool
- default ARCH_MESON
+ tristate "Amlogic Secure Monitor driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default y
depends on ARM64_4K_PAGES
help
Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 2854b56f6e0b..77aa5c6398aa 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -331,3 +331,4 @@ static struct platform_driver meson_sm_driver = {
},
};
module_platform_driver_probe(meson_sm_driver, meson_sm_probe);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 00af99b6f97c..f5fc429cae3f 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -58,15 +58,12 @@ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
static psci_fn *invoke_psci_fn;
-enum psci_function {
- PSCI_FN_CPU_SUSPEND,
- PSCI_FN_CPU_ON,
- PSCI_FN_CPU_OFF,
- PSCI_FN_MIGRATE,
- PSCI_FN_MAX,
-};
+static struct psci_0_1_function_ids psci_0_1_function_ids;
-static u32 psci_function_id[PSCI_FN_MAX];
+struct psci_0_1_function_ids get_psci_0_1_function_ids(void)
+{
+ return psci_0_1_function_ids;
+}
#define PSCI_0_2_POWER_STATE_MASK \
(PSCI_0_2_POWER_STATE_ID_MASK | \
@@ -146,7 +143,12 @@ static int psci_to_linux_errno(int errno)
return -EINVAL;
}
-static u32 psci_get_version(void)
+static u32 psci_0_1_get_version(void)
+{
+ return PSCI_VERSION(0, 1);
+}
+
+static u32 psci_0_2_get_version(void)
{
return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
@@ -163,46 +165,80 @@ int psci_set_osi_mode(bool enable)
return psci_to_linux_errno(err);
}
-static int psci_cpu_suspend(u32 state, unsigned long entry_point)
+static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
{
int err;
- u32 fn;
- fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
err = invoke_psci_fn(fn, state, entry_point, 0);
return psci_to_linux_errno(err);
}
-static int psci_cpu_off(u32 state)
+static int psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
+{
+ return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
+ state, entry_point);
+}
+
+static int psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
+{
+ return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
+ state, entry_point);
+}
+
+static int __psci_cpu_off(u32 fn, u32 state)
{
int err;
- u32 fn;
- fn = psci_function_id[PSCI_FN_CPU_OFF];
err = invoke_psci_fn(fn, state, 0, 0);
return psci_to_linux_errno(err);
}
-static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+static int psci_0_1_cpu_off(u32 state)
+{
+ return __psci_cpu_off(psci_0_1_function_ids.cpu_off, state);
+}
+
+static int psci_0_2_cpu_off(u32 state)
+{
+ return __psci_cpu_off(PSCI_0_2_FN_CPU_OFF, state);
+}
+
+static int __psci_cpu_on(u32 fn, unsigned long cpuid, unsigned long entry_point)
{
int err;
- u32 fn;
- fn = psci_function_id[PSCI_FN_CPU_ON];
err = invoke_psci_fn(fn, cpuid, entry_point, 0);
return psci_to_linux_errno(err);
}
-static int psci_migrate(unsigned long cpuid)
+static int psci_0_1_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ return __psci_cpu_on(psci_0_1_function_ids.cpu_on, cpuid, entry_point);
+}
+
+static int psci_0_2_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ return __psci_cpu_on(PSCI_FN_NATIVE(0_2, CPU_ON), cpuid, entry_point);
+}
+
+static int __psci_migrate(u32 fn, unsigned long cpuid)
{
int err;
- u32 fn;
- fn = psci_function_id[PSCI_FN_MIGRATE];
err = invoke_psci_fn(fn, cpuid, 0, 0);
return psci_to_linux_errno(err);
}
+static int psci_0_1_migrate(unsigned long cpuid)
+{
+ return __psci_migrate(psci_0_1_function_ids.migrate, cpuid);
+}
+
+static int psci_0_2_migrate(unsigned long cpuid)
+{
+ return __psci_migrate(PSCI_FN_NATIVE(0_2, MIGRATE), cpuid);
+}
+
static int psci_affinity_info(unsigned long target_affinity,
unsigned long lowest_affinity_level)
{
@@ -347,7 +383,7 @@ static void __init psci_init_system_suspend(void)
static void __init psci_init_cpu_suspend(void)
{
- int feature = psci_features(psci_function_id[PSCI_FN_CPU_SUSPEND]);
+ int feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND));
if (feature != PSCI_RET_NOT_SUPPORTED)
psci_cpu_suspend_feature = feature;
@@ -421,24 +457,16 @@ static void __init psci_init_smccc(void)
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
- psci_ops.get_version = psci_get_version;
-
- psci_function_id[PSCI_FN_CPU_SUSPEND] =
- PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
- psci_ops.cpu_suspend = psci_cpu_suspend;
-
- psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
- psci_ops.cpu_off = psci_cpu_off;
-
- psci_function_id[PSCI_FN_CPU_ON] = PSCI_FN_NATIVE(0_2, CPU_ON);
- psci_ops.cpu_on = psci_cpu_on;
- psci_function_id[PSCI_FN_MIGRATE] = PSCI_FN_NATIVE(0_2, MIGRATE);
- psci_ops.migrate = psci_migrate;
-
- psci_ops.affinity_info = psci_affinity_info;
-
- psci_ops.migrate_info_type = psci_migrate_info_type;
+ psci_ops = (struct psci_operations){
+ .get_version = psci_0_2_get_version,
+ .cpu_suspend = psci_0_2_cpu_suspend,
+ .cpu_off = psci_0_2_cpu_off,
+ .cpu_on = psci_0_2_cpu_on,
+ .migrate = psci_0_2_migrate,
+ .affinity_info = psci_affinity_info,
+ .migrate_info_type = psci_migrate_info_type,
+ };
arm_pm_restart = psci_sys_reset;
@@ -450,7 +478,7 @@ static void __init psci_0_2_set_functions(void)
*/
static int __init psci_probe(void)
{
- u32 ver = psci_get_version();
+ u32 ver = psci_0_2_get_version();
pr_info("PSCIv%d.%d detected in firmware.\n",
PSCI_VERSION_MAJOR(ver),
@@ -514,24 +542,26 @@ static int __init psci_0_1_init(struct device_node *np)
pr_info("Using PSCI v0.1 Function IDs from DT\n");
+ psci_ops.get_version = psci_0_1_get_version;
+
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
- psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
- psci_ops.cpu_suspend = psci_cpu_suspend;
+ psci_0_1_function_ids.cpu_suspend = id;
+ psci_ops.cpu_suspend = psci_0_1_cpu_suspend;
}
if (!of_property_read_u32(np, "cpu_off", &id)) {
- psci_function_id[PSCI_FN_CPU_OFF] = id;
- psci_ops.cpu_off = psci_cpu_off;
+ psci_0_1_function_ids.cpu_off = id;
+ psci_ops.cpu_off = psci_0_1_cpu_off;
}
if (!of_property_read_u32(np, "cpu_on", &id)) {
- psci_function_id[PSCI_FN_CPU_ON] = id;
- psci_ops.cpu_on = psci_cpu_on;
+ psci_0_1_function_ids.cpu_on = id;
+ psci_ops.cpu_on = psci_0_1_cpu_on;
}
if (!of_property_read_u32(np, "migrate", &id)) {
- psci_function_id[PSCI_FN_MIGRATE] = id;
- psci_ops.migrate = psci_migrate;
+ psci_0_1_function_ids.migrate = id;
+ psci_ops.migrate = psci_0_1_migrate;
}
return 0;
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index c1bbba9ee93a..440d99c63638 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -412,16 +412,12 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
goto out;
}
- len = strlen(ppath) + strlen(name) + 1;
+ len = snprintf(pathbuf, pathlen, "%s%s/", ppath, name);
if (len >= pathlen) {
err = -EINVAL;
goto out;
}
- strncpy(pathbuf, ppath, pathlen);
- strncat(pathbuf, name, strlen(name));
- strcat(pathbuf, "/");
-
err = bpmp_populate_debugfs_inband(bpmp, dentry,
pathbuf);
if (err < 0)
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 896f53ec7857..235c7e7869aa 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -1703,14 +1703,14 @@ fail:
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
struct ti_sci_msg_resp_get_resource_range *resp;
struct ti_sci_msg_req_get_resource_range *req;
@@ -1721,7 +1721,7 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
if (IS_ERR(handle))
return PTR_ERR(handle);
- if (!handle)
+ if (!handle || !desc)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
@@ -1751,11 +1751,14 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
- } else if (!resp->range_start && !resp->range_num) {
+ } else if (!resp->range_num && !resp->range_num_sec) {
+ /* Neither of the two resource range is valid */
ret = -ENODEV;
} else {
- *range_start = resp->range_start;
- *range_num = resp->range_num;
+ desc->start = resp->range_start;
+ desc->num = resp->range_num;
+ desc->start_sec = resp->range_start_sec;
+ desc->num_sec = resp->range_num_sec;
};
fail:
@@ -1771,18 +1774,18 @@ fail:
* @dev_id: TISCI device ID.
* @subtype: Resource assignment subtype that is being requested
* from the given device.
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
return ti_sci_get_resource_range(handle, dev_id, subtype,
TI_SCI_IRQ_SECONDARY_HOST_INVALID,
- range_start, range_num);
+ desc);
}
/**
@@ -1793,18 +1796,17 @@ static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static
int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
- return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
- range_start, range_num);
+ return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
}
/**
@@ -2047,28 +2049,17 @@ static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
}
/**
- * ti_sci_cmd_ring_config() - configure RA ring
- * @handle: Pointer to TI SCI handle.
- * @valid_params: Bitfield defining validity of ring configuration
- * parameters
- * @nav_id: Device ID of Navigator Subsystem from which the ring is
- * allocated
- * @index: Ring index
- * @addr_lo: The ring base address lo 32 bits
- * @addr_hi: The ring base address hi 32 bits
- * @count: Number of ring elements
- * @mode: The mode of the ring
- * @size: The ring element size.
- * @order_id: Specifies the ring's bus order ID
+ * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure
*
* Return: 0 if all went well, else returns appropriate error value.
*
- * See @ti_sci_msg_rm_ring_cfg_req for more info.
+ * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
+ * more info.
*/
-static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
- u32 valid_params, u16 nav_id, u16 index,
- u32 addr_lo, u32 addr_hi, u32 count,
- u8 mode, u8 size, u8 order_id)
+static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_ring_cfg *params)
{
struct ti_sci_msg_rm_ring_cfg_req *req;
struct ti_sci_msg_hdr *resp;
@@ -2092,15 +2083,17 @@ static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
return ret;
}
req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
- req->valid_params = valid_params;
- req->nav_id = nav_id;
- req->index = index;
- req->addr_lo = addr_lo;
- req->addr_hi = addr_hi;
- req->count = count;
- req->mode = mode;
- req->size = size;
- req->order_id = order_id;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->addr_lo = params->addr_lo;
+ req->addr_hi = params->addr_hi;
+ req->count = params->count;
+ req->mode = params->mode;
+ req->size = params->size;
+ req->order_id = params->order_id;
+ req->virtid = params->virtid;
+ req->asel = params->asel;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
@@ -2109,90 +2102,11 @@ static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
- ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
-
-fail:
- ti_sci_put_one_xfer(&info->minfo, xfer);
- dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
- return ret;
-}
-
-/**
- * ti_sci_cmd_ring_get_config() - get RA ring configuration
- * @handle: Pointer to TI SCI handle.
- * @nav_id: Device ID of Navigator Subsystem from which the ring is
- * allocated
- * @index: Ring index
- * @addr_lo: Returns ring's base address lo 32 bits
- * @addr_hi: Returns ring's base address hi 32 bits
- * @count: Returns number of ring elements
- * @mode: Returns mode of the ring
- * @size: Returns ring element size
- * @order_id: Returns ring's bus order ID
- *
- * Return: 0 if all went well, else returns appropriate error value.
- *
- * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
- */
-static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
- u32 nav_id, u32 index, u8 *mode,
- u32 *addr_lo, u32 *addr_hi,
- u32 *count, u8 *size, u8 *order_id)
-{
- struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
- struct ti_sci_msg_rm_ring_get_cfg_req *req;
- struct ti_sci_xfer *xfer;
- struct ti_sci_info *info;
- struct device *dev;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(handle))
- return -EINVAL;
-
- info = handle_to_ti_sci_info(handle);
- dev = info->dev;
-
- xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
- TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
- sizeof(*req), sizeof(*resp));
- if (IS_ERR(xfer)) {
- ret = PTR_ERR(xfer);
- dev_err(dev,
- "RM_RA:Message get config failed(%d)\n", ret);
- return ret;
- }
- req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
- req->nav_id = nav_id;
- req->index = index;
-
- ret = ti_sci_do_xfer(info, xfer);
- if (ret) {
- dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
- goto fail;
- }
-
- resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
-
- if (!ti_sci_is_response_ack(resp)) {
- ret = -ENODEV;
- } else {
- if (mode)
- *mode = resp->mode;
- if (addr_lo)
- *addr_lo = resp->addr_lo;
- if (addr_hi)
- *addr_hi = resp->addr_hi;
- if (count)
- *count = resp->count;
- if (size)
- *size = resp->size;
- if (order_id)
- *order_id = resp->order_id;
- };
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
- dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
+ dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
return ret;
}
@@ -2362,6 +2276,8 @@ static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
req->fdepth = params->fdepth;
req->tx_sched_priority = params->tx_sched_priority;
req->tx_burst_size = params->tx_burst_size;
+ req->tx_tdtype = params->tx_tdtype;
+ req->extended_ch_type = params->extended_ch_type;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
@@ -2921,8 +2837,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
iops->free_irq = ti_sci_cmd_free_irq;
iops->free_event_map = ti_sci_cmd_free_event_map;
- rops->config = ti_sci_cmd_ring_config;
- rops->get_config = ti_sci_cmd_ring_get_config;
+ rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
psilops->pair = ti_sci_cmd_rm_psil_pair;
psilops->unpair = ti_sci_cmd_rm_psil_unpair;
@@ -3157,12 +3072,18 @@ u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
- free_bit = find_first_zero_bit(res->desc[set].res_map,
- res->desc[set].num);
- if (free_bit != res->desc[set].num) {
- set_bit(free_bit, res->desc[set].res_map);
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+ int res_count = desc->num + desc->num_sec;
+
+ free_bit = find_first_zero_bit(desc->res_map, res_count);
+ if (free_bit != res_count) {
+ set_bit(free_bit, desc->res_map);
raw_spin_unlock_irqrestore(&res->lock, flags);
- return res->desc[set].start + free_bit;
+
+ if (desc->num && free_bit < desc->num)
+ return desc->start + free_bit;
+ else
+ return desc->start_sec + free_bit;
}
}
raw_spin_unlock_irqrestore(&res->lock, flags);
@@ -3183,10 +3104,14 @@ void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
- if (res->desc[set].start <= id &&
- (res->desc[set].num + res->desc[set].start) > id)
- clear_bit(id - res->desc[set].start,
- res->desc[set].res_map);
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+
+ if (desc->num && desc->start <= id &&
+ (desc->start + desc->num) > id)
+ clear_bit(id - desc->start, desc->res_map);
+ else if (desc->num_sec && desc->start_sec <= id &&
+ (desc->start_sec + desc->num_sec) > id)
+ clear_bit(id - desc->start_sec, desc->res_map);
}
raw_spin_unlock_irqrestore(&res->lock, flags);
}
@@ -3203,7 +3128,7 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
u32 set, count = 0;
for (set = 0; set < res->sets; set++)
- count += res->desc[set].num;
+ count += res->desc[set].num + res->desc[set].num_sec;
return count;
}
@@ -3227,7 +3152,7 @@ devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
{
struct ti_sci_resource *res;
bool valid_set = false;
- int i, ret;
+ int i, ret, res_count;
res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
if (!res)
@@ -3242,23 +3167,23 @@ devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
for (i = 0; i < res->sets; i++) {
ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
sub_types[i],
- &res->desc[i].start,
- &res->desc[i].num);
+ &res->desc[i]);
if (ret) {
dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
dev_id, sub_types[i]);
- res->desc[i].start = 0;
- res->desc[i].num = 0;
+ memset(&res->desc[i], 0, sizeof(res->desc[i]));
continue;
}
- dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
+ dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
dev_id, sub_types[i], res->desc[i].start,
- res->desc[i].num);
+ res->desc[i].num, res->desc[i].start_sec,
+ res->desc[i].num_sec);
valid_set = true;
+ res_count = res->desc[i].num + res->desc[i].num_sec;
res->desc[i].res_map =
- devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
+ devm_kzalloc(dev, BITS_TO_LONGS(res_count) *
sizeof(*res->desc[i].res_map), GFP_KERNEL);
if (!res->desc[i].res_map)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 57cd04062994..ef3a8214d002 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -49,7 +49,6 @@
#define TI_SCI_MSG_RM_RING_RECONFIG 0x1102
#define TI_SCI_MSG_RM_RING_RESET 0x1103
#define TI_SCI_MSG_RM_RING_CFG 0x1110
-#define TI_SCI_MSG_RM_RING_GET_CFG 0x1111
/* PSI-L requests */
#define TI_SCI_MSG_RM_PSIL_PAIR 0x1280
@@ -574,8 +573,10 @@ struct ti_sci_msg_req_get_resource_range {
/**
* struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
* @hdr: Generic Header
- * @range_start: Start index of the resource range.
- * @range_num: Number of resources in the range.
+ * @range_start: Start index of the first resource range.
+ * @range_num: Number of resources in the first range.
+ * @range_start_sec: Start index of the second resource range.
+ * @range_num_sec: Number of resources in the second range.
*
* Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
*/
@@ -583,6 +584,8 @@ struct ti_sci_msg_resp_get_resource_range {
struct ti_sci_msg_hdr hdr;
u16 range_start;
u16 range_num;
+ u16 range_start_sec;
+ u16 range_num_sec;
} __packed;
/**
@@ -656,6 +659,8 @@ struct ti_sci_msg_req_manage_irq {
* 3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode
* 4 - Valid bit for @tisci_msg_rm_ring_cfg_req size
* 5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id
+ * 6 - Valid bit for @tisci_msg_rm_ring_cfg_req virtid
+ * 7 - Valid bit for @tisci_msg_rm_ring_cfg_req ASEL
* @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
* @index: ring index to be configured.
* @addr_lo: 32 LSBs of ring base address to be programmed into the ring's
@@ -669,6 +674,9 @@ struct ti_sci_msg_req_manage_irq {
* the formula (log2(size_bytes) - 2), where size_bytes cannot be
* greater than 256.
* @order_id: Specifies the ring's bus order ID.
+ * @virtid: Ring virt ID value
+ * @asel: Ring ASEL (address select) value to be set into the ASEL field of the
+ * ring's RING_BA_HI register.
*/
struct ti_sci_msg_rm_ring_cfg_req {
struct ti_sci_msg_hdr hdr;
@@ -681,49 +689,8 @@ struct ti_sci_msg_rm_ring_cfg_req {
u8 mode;
u8 size;
u8 order_id;
-} __packed;
-
-/**
- * struct ti_sci_msg_rm_ring_get_cfg_req - Get RA ring's configuration
- *
- * Gets the configuration of the non-real-time register fields of a ring. The
- * host, or a supervisor of the host, who owns the ring must be the requesting
- * host. The values of the non-real-time registers are returned in
- * @ti_sci_msg_rm_ring_get_cfg_resp.
- *
- * @hdr: Generic Header
- * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
- * @index: ring index.
- */
-struct ti_sci_msg_rm_ring_get_cfg_req {
- struct ti_sci_msg_hdr hdr;
- u16 nav_id;
- u16 index;
-} __packed;
-
-/**
- * struct ti_sci_msg_rm_ring_get_cfg_resp - Ring get configuration response
- *
- * Response received by host processor after RM has handled
- * @ti_sci_msg_rm_ring_get_cfg_req. The response contains the ring's
- * non-real-time register values.
- *
- * @hdr: Generic Header
- * @addr_lo: Ring 32 LSBs of base address
- * @addr_hi: Ring 16 MSBs of base address.
- * @count: Ring number of elements.
- * @mode: Ring mode.
- * @size: encoded Ring element size
- * @order_id: ing order ID.
- */
-struct ti_sci_msg_rm_ring_get_cfg_resp {
- struct ti_sci_msg_hdr hdr;
- u32 addr_lo;
- u32 addr_hi;
- u32 count;
- u8 mode;
- u8 size;
- u8 order_id;
+ u16 virtid;
+ u8 asel;
} __packed;
/**
@@ -910,6 +877,8 @@ struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
* 12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count
* 13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth
* 14 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_burst_size
+ * 15 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_tdtype
+ * 16 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::extended_ch_type
*
* @nav_id: SoC device ID of Navigator Subsystem where tx channel is located
*
@@ -973,6 +942,15 @@ struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
*
* @tx_burst_size: UDMAP transmit channel burst size configuration to be
* programmed into the tx_burst_size field of the TCHAN_TCFG register.
+ *
+ * @tx_tdtype: UDMAP transmit channel teardown type configuration to be
+ * programmed into the tdtype field of the TCHAN_TCFG register:
+ * 0 - Return immediately
+ * 1 - Wait for completion message from remote peer
+ *
+ * @extended_ch_type: Valid for BCDMA.
+ * 0 - the channel is split tx channel (tchan)
+ * 1 - the channel is block copy channel (bchan)
*/
struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
struct ti_sci_msg_hdr hdr;
@@ -994,6 +972,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
u16 fdepth;
u8 tx_sched_priority;
u8 tx_burst_size;
+ u8 tx_tdtype;
+ u8 extended_ch_type;
} __packed;
/**
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index fd95edeb702b..7eb9958662dd 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -615,13 +615,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
/**
* zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
*
- * @node_id Node ID of the device
- * @type Type of tap delay to set (input/output)
- * @value Value to set fot the tap delay
+ * @node_id: Node ID of the device
+ * @type: Type of tap delay to set (input/output)
+ * @value: Value to set fot the tap delay
*
* This function sets input/output tap delay for the SD device.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
{
@@ -633,12 +633,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
/**
* zynqmp_pm_sd_dll_reset() - Reset DLL logic
*
- * @node_id Node ID of the device
- * @type Reset type
+ * @node_id: Node ID of the device
+ * @type: Reset type
*
* This function resets DLL logic for the SD device.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
{
@@ -649,12 +649,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
/**
* zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
- * @index GGS register index
- * @value Register value to be written
+ * @index: GGS register index
+ * @value: Register value to be written
*
* This function writes value to GGS register.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_write_ggs(u32 index, u32 value)
{
@@ -665,12 +665,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
/**
* zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs)
- * @index GGS register index
- * @value Register value to be written
+ * @index: GGS register index
+ * @value: Register value to be written
*
* This function returns GGS register value.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_read_ggs(u32 index, u32 *value)
{
@@ -682,12 +682,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
/**
* zynqmp_pm_write_pggs() - PM API for writing persistent global general
* storage (pggs)
- * @index PGGS register index
- * @value Register value to be written
+ * @index: PGGS register index
+ * @value: Register value to be written
*
* This function writes value to PGGS register.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_write_pggs(u32 index, u32 value)
{
@@ -699,12 +699,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
/**
* zynqmp_pm_write_pggs() - PM API for reading persistent global general
* storage (pggs)
- * @index PGGS register index
- * @value Register value to be written
+ * @index: PGGS register index
+ * @value: Register value to be written
*
* This function returns PGGS register value.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_read_pggs(u32 index, u32 *value)
{
@@ -715,12 +715,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
/**
* zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
- * @value Status value to be written
+ * @value: Status value to be written
*
* This function sets healthy bit value to indicate boot health status
* to firmware.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_boot_health_status(u32 value)
{
@@ -815,10 +815,10 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
* master has initialized its own power management
*
+ * Return: Returns status, either success or error+reason
+ *
* This API function is to be used for notify the power management controller
* about the completed power management initialization.
- *
- * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_init_finalize(void)
{
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 5d4de5cd6759..dea65d85594f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -59,8 +59,9 @@ config DEBUG_GPIO
that are most common when setting up new platforms or boards.
config GPIO_SYSFS
- bool "/sys/class/gpio/... (sysfs interface)"
+ bool "/sys/class/gpio/... (sysfs interface)" if EXPERT
depends on SYSFS
+ select GPIO_CDEV # We need to encourage the new ABI
help
Say Y here to add the legacy sysfs interface for GPIOs.
@@ -255,6 +256,7 @@ config GPIO_EP93XX
config GPIO_EXAR
tristate "Support for GPIO pins on XR17V352/354/358"
depends on SERIAL_8250_EXAR
+ select REGMAP_MMIO
help
Selecting this option will enable handling of GPIO pins present
on Exar XR17V352/354/358 chips.
@@ -296,6 +298,17 @@ config GPIO_GRGPIO
Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
VHDL IP core library.
+config GPIO_HISI
+ tristate "HiSilicon GPIO controller driver"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y or M here to build support for the HiSilicon GPIO controller
+ driver GPIO block.
+ This GPIO controller support double-edge interrupt and multi-core
+ concurrent access.
+
config GPIO_HLWD
tristate "Nintendo Wii (Hollywood) GPIO"
depends on OF_GPIO
@@ -508,7 +521,8 @@ config GPIO_SAMA5D2_PIOBU
config GPIO_SIFIVE
bool "SiFive GPIO support"
- depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+ depends on OF_GPIO
+ select IRQ_DOMAIN_HIERARCHY
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select REGMAP_MMIO
@@ -584,6 +598,8 @@ config GPIO_TEGRA
default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
@@ -737,6 +753,17 @@ config GPIO_AMD_FCH
Note: This driver doesn't registers itself automatically, as it
needs to be provided with platform specific configuration.
(See eg. CONFIG_PCENGINES_APU2.)
+
+config GPIO_MSC313
+ bool "MStar MSC313 GPIO support"
+ depends on ARCH_MSTARV7
+ default ARCH_MSTARV7
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say Y here to support the main GPIO block on MStar/SigmaStar
+ ARMv7 based SoCs.
+
endmenu
menu "Port-mapped I/O GPIO drivers"
@@ -1590,6 +1617,8 @@ config GPIO_VIPERBOARD
endmenu
+menu "Virtual GPIO drivers"
+
config GPIO_AGGREGATOR
tristate "GPIO Aggregator"
help
@@ -1613,4 +1642,6 @@ config GPIO_MOCKUP
tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
it.
+endmenu
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 09dada80ac34..35e3b6026665 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o
obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o
+obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
@@ -101,6 +102,7 @@ obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
+obj-$(CONFIG_GPIO_MSC313) += gpio-msc313.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index e560e45e84f8..0229fa79499e 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -129,58 +129,9 @@ GPIOLIB irqchip
The GPIOLIB irqchip is a helper irqchip for "simple cases" that should
try to cover any generic kind of irqchip cascaded from a GPIO.
-- Convert all the GPIOLIB_IRQCHIP users to pass an irqchip template,
- parent and flags before calling [devm_]gpiochip_add[_data]().
- Currently we set up the irqchip after setting up the gpiochip
- using gpiochip_irqchip_add() and gpiochip_set_[chained|nested]_irqchip().
- This is too complex, so convert all users over to just set up
- the irqchip before registering the gpio_chip, typical example:
-
- /* Typical state container with dynamic irqchip */
- struct my_gpio {
- struct gpio_chip gc;
- struct irq_chip irq;
- };
-
- int irq; /* from platform etc */
- struct my_gpio *g;
- struct gpio_irq_chip *girq;
-
- /* Set up the irqchip dynamically */
- g->irq.name = "my_gpio_irq";
- g->irq.irq_ack = my_gpio_ack_irq;
- g->irq.irq_mask = my_gpio_mask_irq;
- g->irq.irq_unmask = my_gpio_unmask_irq;
- g->irq.irq_set_type = my_gpio_set_irq_type;
-
- /* Get a pointer to the gpio_irq_chip */
- girq = &g->gc.irq;
- girq->chip = &g->irq;
- girq->parent_handler = ftgpio_gpio_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
- girq->parents[0] = irq;
-
- When this is done, we will delete the old APIs for instatiating
- GPIOLIB_IRQCHIP and simplify the code.
-
- Look over and identify any remaining easily converted drivers and
dry-code conversions to gpiolib irqchip for maintainers to test
-- Drop gpiochip_set_chained_irqchip() when all the chained irqchips
- have been converted to the above infrastructure.
-
-- Add more infrastructure to make it possible to also pass a threaded
- irqchip in struct gpio_irq_chip.
-
-- Drop gpiochip_irqchip_add_nested() when all the chained irqchips
- have been converted to the above infrastructure.
-
Increase integration with pin control
@@ -191,3 +142,39 @@ use of the global GPIO numbers. Once the above is complete, it may
make sense to simply join the subsystems into one and make pin
multiplexing, pin configuration, GPIO, etc selectable options in one
and the same pin control and GPIO subsystem.
+
+
+Debugfs in place of sysfs
+
+The old sysfs code that enables simple uses of GPIOs from the
+command line is still popular despite the existance of the proper
+character device. The reason is that it is simple to use on
+root filesystems where you only have a minimal set of tools such
+as "cat", "echo" etc.
+
+The old sysfs still need to be strongly deprecated and removed
+as it relies on the global GPIO numberspace that assume a strict
+order of global GPIO numbers that do not change between boots
+and is independent of probe order.
+
+To solve this and provide an ABI that people can use for hacks
+and development, implement a debugfs interface to manipulate
+GPIO lines that can do everything that sysfs can do today: one
+directory per gpiochip and one file entry per line:
+
+/sys/kernel/debug/gpiochip/gpiochip0
+/sys/kernel/debug/gpiochip/gpiochip0/gpio0
+/sys/kernel/debug/gpiochip/gpiochip0/gpio1
+/sys/kernel/debug/gpiochip/gpiochip0/gpio2
+/sys/kernel/debug/gpiochip/gpiochip0/gpio3
+...
+/sys/kernel/debug/gpiochip/gpiochip1
+/sys/kernel/debug/gpiochip/gpiochip1/gpio0
+/sys/kernel/debug/gpiochip/gpiochip1/gpio1
+...
+
+The exact files and design of the debugfs interface can be
+discussed but the idea is to provide a low-level access point
+for debugging and hacking and to expose all lines without the
+need of any exporting. Also provide ample ammunition to shoot
+oneself in the foot, because this is debugfs after all.
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 94c3a9bc4e75..b132afaf7d99 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -132,8 +132,7 @@ static void idi_48_irq_mask(struct irq_data *data)
outb(idi48gpio->cos_enb, idi48gpio->base + 7);
- raw_spin_unlock_irqrestore(&idi48gpio->lock,
- flags);
+ raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
return;
@@ -166,8 +165,7 @@ static void idi_48_irq_unmask(struct irq_data *data)
outb(idi48gpio->cos_enb, idi48gpio->base + 7);
- raw_spin_unlock_irqrestore(&idi48gpio->lock,
- flags);
+ raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
return;
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index fdcebe59510d..14e6b3e64add 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPIO driver for AMD 8111 south bridges
*
@@ -20,10 +21,6 @@
* Hardware driver for Intel i810 Random Number Generator (RNG)
* Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/ioport.h>
#include <linux/module.h>
@@ -179,7 +176,6 @@ static int __init amd_gpio_init(void)
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
-
/* We look for our device - AMD South Bridge
* I don't know about a system with two such bridges,
* so we can assume that there is max. one device.
@@ -223,11 +219,10 @@ found:
spin_lock_init(&gp.lock);
- printk(KERN_INFO "AMD-8111 GPIO detected\n");
+ dev_info(&pdev->dev, "AMD-8111 GPIO detected\n");
err = gpiochip_add_data(&gp.chip, &gp);
if (err) {
- printk(KERN_ERR "GPIO registering failed (%d)\n",
- err);
+ dev_err(&pdev->dev, "GPIO registering failed (%d)\n", err);
ioport_unmap(gp.pm);
goto out;
}
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index d5359341cc6b..678ddd375891 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -123,6 +123,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
switch (flow_type) {
case IRQ_TYPE_EDGE_RISING:
polarity |= mask;
+ fallthrough;
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
break;
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index a6f30ad6750f..7920cf256798 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -175,13 +175,13 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
err = pci_enable_device(dev);
if (err) {
- printk(KERN_ERR "bt8xxgpio: Can't enable device.\n");
+ dev_err(&dev->dev, "can't enable device.\n");
return err;
}
if (!devm_request_mem_region(&dev->dev, pci_resource_start(dev, 0),
pci_resource_len(dev, 0),
"bt8xxgpio")) {
- printk(KERN_WARNING "bt8xxgpio: Can't request iomem (0x%llx).\n",
+ dev_warn(&dev->dev, "can't request iomem (0x%llx).\n",
(unsigned long long)pci_resource_start(dev, 0));
err = -EBUSY;
goto err_disable;
@@ -191,7 +191,7 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
bg->mmio = devm_ioremap(&dev->dev, pci_resource_start(dev, 0), 0x1000);
if (!bg->mmio) {
- printk(KERN_ERR "bt8xxgpio: ioremap() failed\n");
+ dev_err(&dev->dev, "ioremap() failed\n");
err = -EIO;
goto err_disable;
}
@@ -207,7 +207,7 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
bt8xxgpio_gpio_setup(bg);
err = gpiochip_add_data(&bg->gpio, bg);
if (err) {
- printk(KERN_ERR "bt8xxgpio: Failed to register GPIOs\n");
+ dev_err(&dev->dev, "failed to register GPIOs\n");
goto err_disable;
}
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 53b24e3ae7de..6da3a247614a 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -345,12 +345,8 @@ static int cs5535_gpio_probe(struct platform_device *pdev)
mask_orig, mask);
/* finally, register with the generic GPIO API */
- err = devm_gpiochip_add_data(&pdev->dev, &cs5535_gpio_chip.chip,
- &cs5535_gpio_chip);
- if (err)
- return err;
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &cs5535_gpio_chip.chip,
+ &cs5535_gpio_chip);
}
static struct platform_driver cs5535_gpio_driver = {
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 4275c18a097a..d3233cc4b76b 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -616,10 +616,9 @@ static int dwapb_get_reset(struct dwapb_gpio *gpio)
int err;
gpio->rst = devm_reset_control_get_optional_shared(gpio->dev, NULL);
- if (IS_ERR(gpio->rst)) {
- dev_err(gpio->dev, "Cannot get reset descriptor\n");
- return PTR_ERR(gpio->rst);
- }
+ if (IS_ERR(gpio->rst))
+ return dev_err_probe(gpio->dev, PTR_ERR(gpio->rst),
+ "Cannot get reset descriptor\n");
err = reset_control_deassert(gpio->rst);
if (err) {
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index b1accfba017d..d37de78247a6 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -4,14 +4,17 @@
*
* Copyright (C) 2015 Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
*/
+
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#define EXAR_OFFSET_MPIOLVL_LO 0x90
#define EXAR_OFFSET_MPIOSEL_LO 0x93
@@ -24,60 +27,39 @@ static DEFINE_IDA(ida_index);
struct exar_gpio_chip {
struct gpio_chip gpio_chip;
- struct mutex lock;
+ struct regmap *regmap;
int index;
- void __iomem *regs;
char name[20];
unsigned int first_pin;
};
-static void exar_update(struct gpio_chip *chip, unsigned int reg, int val,
- unsigned int offset)
+static unsigned int
+exar_offset_to_sel_addr(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- int temp;
-
- mutex_lock(&exar_gpio->lock);
- temp = readb(exar_gpio->regs + reg);
- temp &= ~BIT(offset);
- if (val)
- temp |= BIT(offset);
- writeb(temp, exar_gpio->regs + reg);
- mutex_unlock(&exar_gpio->lock);
+ return (offset + exar_gpio->first_pin) / 8 ? EXAR_OFFSET_MPIOSEL_HI
+ : EXAR_OFFSET_MPIOSEL_LO;
}
-static int exar_set_direction(struct gpio_chip *chip, int direction,
- unsigned int offset)
+static unsigned int
+exar_offset_to_lvl_addr(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
- EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
- unsigned int bit = (offset + exar_gpio->first_pin) % 8;
-
- exar_update(chip, addr, direction, bit);
- return 0;
+ return (offset + exar_gpio->first_pin) / 8 ? EXAR_OFFSET_MPIOLVL_HI
+ : EXAR_OFFSET_MPIOLVL_LO;
}
-static int exar_get(struct gpio_chip *chip, unsigned int reg)
+static unsigned int
+exar_offset_to_bit(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- int value;
-
- mutex_lock(&exar_gpio->lock);
- value = readb(exar_gpio->regs + reg);
- mutex_unlock(&exar_gpio->lock);
-
- return value;
+ return (offset + exar_gpio->first_pin) % 8;
}
static int exar_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
- EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
- unsigned int bit = (offset + exar_gpio->first_pin) % 8;
+ unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
- if (exar_get(chip, addr) & BIT(bit))
+ if (regmap_test_bits(exar_gpio->regmap, addr, BIT(bit)))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -86,39 +68,66 @@ static int exar_get_direction(struct gpio_chip *chip, unsigned int offset)
static int exar_get_value(struct gpio_chip *chip, unsigned int offset)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
- EXAR_OFFSET_MPIOLVL_HI : EXAR_OFFSET_MPIOLVL_LO;
- unsigned int bit = (offset + exar_gpio->first_pin) % 8;
+ unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
- return !!(exar_get(chip, addr) & BIT(bit));
+ return !!(regmap_test_bits(exar_gpio->regmap, addr, BIT(bit)));
}
static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
- EXAR_OFFSET_MPIOLVL_HI : EXAR_OFFSET_MPIOLVL_LO;
- unsigned int bit = (offset + exar_gpio->first_pin) % 8;
+ unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
- exar_update(chip, addr, value, bit);
+ if (value)
+ regmap_set_bits(exar_gpio->regmap, addr, BIT(bit));
+ else
+ regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
}
static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
+ struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
+ unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+
exar_set_value(chip, offset, value);
- return exar_set_direction(chip, 0, offset);
+ regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
+
+ return 0;
}
static int exar_direction_input(struct gpio_chip *chip, unsigned int offset)
{
- return exar_set_direction(chip, 1, offset);
+ struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
+ unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+
+ regmap_set_bits(exar_gpio->regmap, addr, BIT(bit));
+
+ return 0;
}
+static void exar_devm_ida_free(void *data)
+{
+ struct exar_gpio_chip *exar_gpio = data;
+
+ ida_free(&ida_index, exar_gpio->index);
+}
+
+static const struct regmap_config exar_regmap_config = {
+ .name = "exar-gpio",
+ .reg_bits = 16,
+ .val_bits = 8,
+};
+
static int gpio_exar_probe(struct platform_device *pdev)
{
- struct pci_dev *pcidev = to_pci_dev(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct pci_dev *pcidev = to_pci_dev(dev->parent);
struct exar_gpio_chip *exar_gpio;
u32 first_pin, ngpios;
void __iomem *p;
@@ -132,30 +141,37 @@ static int gpio_exar_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
- ret = device_property_read_u32(&pdev->dev, "exar,first-pin",
- &first_pin);
+ ret = device_property_read_u32(dev, "exar,first-pin", &first_pin);
if (ret)
return ret;
- ret = device_property_read_u32(&pdev->dev, "ngpios", &ngpios);
+ ret = device_property_read_u32(dev, "ngpios", &ngpios);
if (ret)
return ret;
- exar_gpio = devm_kzalloc(&pdev->dev, sizeof(*exar_gpio), GFP_KERNEL);
+ exar_gpio = devm_kzalloc(dev, sizeof(*exar_gpio), GFP_KERNEL);
if (!exar_gpio)
return -ENOMEM;
- mutex_init(&exar_gpio->lock);
+ /*
+ * We don't need to check the return values of mmio regmap operations (unless
+ * the regmap has a clock attached which is not the case here).
+ */
+ exar_gpio->regmap = devm_regmap_init_mmio(dev, p, &exar_regmap_config);
+ if (IS_ERR(exar_gpio->regmap))
+ return PTR_ERR(exar_gpio->regmap);
+
+ index = ida_alloc(&ida_index, GFP_KERNEL);
+ if (index < 0)
+ return index;
- index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
- if (index < 0) {
- ret = index;
- goto err_mutex_destroy;
- }
+ ret = devm_add_action_or_reset(dev, exar_devm_ida_free, exar_gpio);
+ if (ret)
+ return ret;
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
- exar_gpio->gpio_chip.parent = &pdev->dev;
+ exar_gpio->gpio_chip.parent = dev;
exar_gpio->gpio_chip.direction_output = exar_direction_output;
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
@@ -163,39 +179,20 @@ static int gpio_exar_probe(struct platform_device *pdev)
exar_gpio->gpio_chip.set = exar_set_value;
exar_gpio->gpio_chip.base = -1;
exar_gpio->gpio_chip.ngpio = ngpios;
- exar_gpio->regs = p;
exar_gpio->index = index;
exar_gpio->first_pin = first_pin;
- ret = devm_gpiochip_add_data(&pdev->dev,
- &exar_gpio->gpio_chip, exar_gpio);
+ ret = devm_gpiochip_add_data(dev, &exar_gpio->gpio_chip, exar_gpio);
if (ret)
- goto err_destroy;
+ return ret;
platform_set_drvdata(pdev, exar_gpio);
return 0;
-
-err_destroy:
- ida_simple_remove(&ida_index, index);
-err_mutex_destroy:
- mutex_destroy(&exar_gpio->lock);
- return ret;
-}
-
-static int gpio_exar_remove(struct platform_device *pdev)
-{
- struct exar_gpio_chip *exar_gpio = platform_get_drvdata(pdev);
-
- ida_simple_remove(&ida_index, exar_gpio->index);
- mutex_destroy(&exar_gpio->lock);
-
- return 0;
}
static struct platform_driver gpio_exar_driver = {
.probe = gpio_exar_probe,
- .remove = gpio_exar_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/gpio/gpio-hisi.c b/drivers/gpio/gpio-hisi.c
new file mode 100644
index 000000000000..ad3d4da25160
--- /dev/null
+++ b/drivers/gpio/gpio-hisi.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 HiSilicon Limited. */
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define HISI_GPIO_SWPORT_DR_SET_WX 0x000
+#define HISI_GPIO_SWPORT_DR_CLR_WX 0x004
+#define HISI_GPIO_SWPORT_DDR_SET_WX 0x010
+#define HISI_GPIO_SWPORT_DDR_CLR_WX 0x014
+#define HISI_GPIO_SWPORT_DDR_ST_WX 0x018
+#define HISI_GPIO_INTEN_SET_WX 0x020
+#define HISI_GPIO_INTEN_CLR_WX 0x024
+#define HISI_GPIO_INTMASK_SET_WX 0x030
+#define HISI_GPIO_INTMASK_CLR_WX 0x034
+#define HISI_GPIO_INTTYPE_EDGE_SET_WX 0x040
+#define HISI_GPIO_INTTYPE_EDGE_CLR_WX 0x044
+#define HISI_GPIO_INT_POLARITY_SET_WX 0x050
+#define HISI_GPIO_INT_POLARITY_CLR_WX 0x054
+#define HISI_GPIO_DEBOUNCE_SET_WX 0x060
+#define HISI_GPIO_DEBOUNCE_CLR_WX 0x064
+#define HISI_GPIO_INTSTATUS_WX 0x070
+#define HISI_GPIO_PORTA_EOI_WX 0x078
+#define HISI_GPIO_EXT_PORT_WX 0x080
+#define HISI_GPIO_INTCOMB_MASK_WX 0x0a0
+#define HISI_GPIO_INT_DEDGE_SET 0x0b0
+#define HISI_GPIO_INT_DEDGE_CLR 0x0b4
+#define HISI_GPIO_INT_DEDGE_ST 0x0b8
+
+#define HISI_GPIO_LINE_NUM_MAX 32
+#define HISI_GPIO_DRIVER_NAME "gpio-hisi"
+
+struct hisi_gpio {
+ struct gpio_chip chip;
+ struct device *dev;
+ void __iomem *reg_base;
+ unsigned int line_num;
+ struct irq_chip irq_chip;
+ int irq;
+};
+
+static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
+ unsigned int off)
+{
+ struct hisi_gpio *hisi_gpio =
+ container_of(chip, struct hisi_gpio, chip);
+ void __iomem *reg = hisi_gpio->reg_base + off;
+
+ return readl(reg);
+}
+
+static inline void hisi_gpio_write_reg(struct gpio_chip *chip,
+ unsigned int off, u32 val)
+{
+ struct hisi_gpio *hisi_gpio =
+ container_of(chip, struct hisi_gpio, chip);
+ void __iomem *reg = hisi_gpio->reg_base + off;
+
+ writel(val, reg);
+}
+
+static void hisi_gpio_set_debounce(struct gpio_chip *chip, unsigned int off,
+ u32 debounce)
+{
+ if (debounce)
+ hisi_gpio_write_reg(chip, HISI_GPIO_DEBOUNCE_SET_WX, BIT(off));
+ else
+ hisi_gpio_write_reg(chip, HISI_GPIO_DEBOUNCE_CLR_WX, BIT(off));
+}
+
+static int hisi_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ u32 config_para = pinconf_to_config_param(config);
+ u32 config_arg;
+
+ switch (config_para) {
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ config_arg = pinconf_to_config_argument(config);
+ hisi_gpio_set_debounce(chip, offset, config_arg);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void hisi_gpio_set_ack(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_write_reg(chip, HISI_GPIO_PORTA_EOI_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static void hisi_gpio_irq_set_mask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTMASK_SET_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static void hisi_gpio_irq_clr_mask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTMASK_CLR_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static int hisi_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int mask = BIT(irqd_to_hwirq(d));
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_DEDGE_SET, mask);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_SET_WX, mask);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_SET_WX, mask);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_SET_WX, mask);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_CLR_WX, mask);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_CLR_WX, mask);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_SET_WX, mask);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_CLR_WX, mask);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_CLR_WX, mask);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * The dual-edge interrupt and other interrupt's registers do not
+ * take effect at the same time. The registers of the two-edge
+ * interrupts have higher priorities, the configuration of
+ * the dual-edge interrupts must be disabled before the configuration
+ * of other kind of interrupts.
+ */
+ if (type != IRQ_TYPE_EDGE_BOTH) {
+ unsigned int both = hisi_gpio_read_reg(chip, HISI_GPIO_INT_DEDGE_ST);
+
+ if (both & mask)
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_DEDGE_CLR, mask);
+ }
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+ else if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ return 0;
+}
+
+static void hisi_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_irq_clr_mask(d);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTEN_SET_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static void hisi_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_irq_set_mask(d);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTEN_CLR_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static void hisi_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct hisi_gpio *hisi_gpio = irq_desc_get_handler_data(desc);
+ unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip,
+ HISI_GPIO_INTSTATUS_WX);
+ struct irq_chip *irq_c = irq_desc_get_chip(desc);
+ int hwirq;
+
+ chained_irq_enter(irq_c, desc);
+ for_each_set_bit(hwirq, &irq_msk, HISI_GPIO_LINE_NUM_MAX)
+ generic_handle_irq(irq_find_mapping(hisi_gpio->chip.irq.domain,
+ hwirq));
+ chained_irq_exit(irq_c, desc);
+}
+
+static void hisi_gpio_init_irq(struct hisi_gpio *hisi_gpio)
+{
+ struct gpio_chip *chip = &hisi_gpio->chip;
+ struct gpio_irq_chip *girq_chip = &chip->irq;
+
+ /* Set hooks for irq_chip */
+ hisi_gpio->irq_chip.irq_ack = hisi_gpio_set_ack;
+ hisi_gpio->irq_chip.irq_mask = hisi_gpio_irq_set_mask;
+ hisi_gpio->irq_chip.irq_unmask = hisi_gpio_irq_clr_mask;
+ hisi_gpio->irq_chip.irq_set_type = hisi_gpio_irq_set_type;
+ hisi_gpio->irq_chip.irq_enable = hisi_gpio_irq_enable;
+ hisi_gpio->irq_chip.irq_disable = hisi_gpio_irq_disable;
+
+ girq_chip->chip = &hisi_gpio->irq_chip;
+ girq_chip->default_type = IRQ_TYPE_NONE;
+ girq_chip->num_parents = 1;
+ girq_chip->parents = &hisi_gpio->irq;
+ girq_chip->parent_handler = hisi_gpio_irq_handler;
+ girq_chip->parent_handler_data = hisi_gpio;
+
+ /* Clear Mask of GPIO controller combine IRQ */
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTCOMB_MASK_WX, 1);
+}
+
+static const struct acpi_device_id hisi_gpio_acpi_match[] = {
+ {"HISI0184", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_gpio_acpi_match);
+
+static void hisi_gpio_get_pdata(struct device *dev,
+ struct hisi_gpio *hisi_gpio)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fwnode_handle *fwnode;
+ int idx = 0;
+
+ device_for_each_child_node(dev, fwnode) {
+ /* Cycle for once, no need for an array to save line_num */
+ if (fwnode_property_read_u32(fwnode, "ngpios",
+ &hisi_gpio->line_num)) {
+ dev_err(dev,
+ "failed to get number of lines for port%d and use default value instead\n",
+ idx);
+ hisi_gpio->line_num = HISI_GPIO_LINE_NUM_MAX;
+ }
+
+ if (WARN_ON(hisi_gpio->line_num > HISI_GPIO_LINE_NUM_MAX))
+ hisi_gpio->line_num = HISI_GPIO_LINE_NUM_MAX;
+
+ hisi_gpio->irq = platform_get_irq(pdev, idx);
+
+ dev_info(dev,
+ "get hisi_gpio[%d] with %d lines\n", idx,
+ hisi_gpio->line_num);
+
+ idx++;
+ }
+}
+
+static int hisi_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_gpio *hisi_gpio;
+ int port_num;
+ int ret;
+
+ /*
+ * One GPIO controller own one port currently,
+ * if we get more from ACPI table, return error.
+ */
+ port_num = device_get_child_node_count(dev);
+ if (WARN_ON(port_num != 1))
+ return -ENODEV;
+
+ hisi_gpio = devm_kzalloc(dev, sizeof(*hisi_gpio), GFP_KERNEL);
+ if (!hisi_gpio)
+ return -ENOMEM;
+
+ hisi_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hisi_gpio->reg_base))
+ return PTR_ERR(hisi_gpio->reg_base);
+
+ hisi_gpio_get_pdata(dev, hisi_gpio);
+
+ hisi_gpio->dev = dev;
+
+ ret = bgpio_init(&hisi_gpio->chip, hisi_gpio->dev, 0x4,
+ hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
+ hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
+ hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
+ hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
+ hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
+ BGPIOF_NO_SET_ON_INPUT);
+ if (ret) {
+ dev_err(dev, "failed to init, ret = %d\n", ret);
+ return ret;
+ }
+
+ hisi_gpio->chip.set_config = hisi_gpio_set_config;
+ hisi_gpio->chip.ngpio = hisi_gpio->line_num;
+ hisi_gpio->chip.bgpio_dir_unreadable = 1;
+ hisi_gpio->chip.base = -1;
+
+ if (hisi_gpio->irq > 0)
+ hisi_gpio_init_irq(hisi_gpio);
+
+ ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip, hisi_gpio);
+ if (ret) {
+ dev_err(dev, "failed to register gpiochip, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver hisi_gpio_driver = {
+ .driver = {
+ .name = HISI_GPIO_DRIVER_NAME,
+ .acpi_match_table = hisi_gpio_acpi_match,
+ },
+ .probe = hisi_gpio_probe,
+};
+
+module_platform_driver(hisi_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Luo Jiaxing <luojiaxing@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon GPIO controller driver");
+MODULE_ALIAS("platform:" HISI_GPIO_DRIVER_NAME);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 67ed4f238d43..28b757d34046 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -15,6 +15,7 @@
#include <linux/irq.h>
#include <linux/irq_sim.h>
#include <linux/irqdomain.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -460,9 +461,16 @@ static int gpio_mockup_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id gpio_mockup_of_match[] = {
+ { .compatible = "gpio-mockup", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gpio_mockup_of_match);
+
static struct platform_driver gpio_mockup_driver = {
.driver = {
.name = "gpio-mockup",
+ .of_match_table = gpio_mockup_of_match,
},
.probe = gpio_mockup_probe,
};
@@ -556,8 +564,7 @@ static int __init gpio_mockup_init(void)
{
int i, num_chips, err;
- if ((gpio_mockup_num_ranges < 2) ||
- (gpio_mockup_num_ranges % 2) ||
+ if ((gpio_mockup_num_ranges % 2) ||
(gpio_mockup_num_ranges > GPIO_MOCKUP_MAX_RANGES))
return -EINVAL;
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
new file mode 100644
index 000000000000..da31a5ff7a2b
--- /dev/null
+++ b/drivers/gpio/gpio-msc313.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Daniel Palmer<daniel@thingy.jp> */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/gpio/msc313-gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define DRIVER_NAME "gpio-msc313"
+
+#define MSC313_GPIO_IN BIT(0)
+#define MSC313_GPIO_OUT BIT(4)
+#define MSC313_GPIO_OEN BIT(5)
+
+/*
+ * These bits need to be saved to correctly restore the
+ * gpio state when resuming from suspend to memory.
+ */
+#define MSC313_GPIO_BITSTOSAVE (MSC313_GPIO_OUT | MSC313_GPIO_OEN)
+
+/* pad names for fuart, same for all SoCs so far */
+#define MSC313_PINNAME_FUART_RX "fuart_rx"
+#define MSC313_PINNAME_FUART_TX "fuart_tx"
+#define MSC313_PINNAME_FUART_CTS "fuart_cts"
+#define MSC313_PINNAME_FUART_RTS "fuart_rts"
+
+/* pad names for sr, mercury5 is different */
+#define MSC313_PINNAME_SR_IO2 "sr_io2"
+#define MSC313_PINNAME_SR_IO3 "sr_io3"
+#define MSC313_PINNAME_SR_IO4 "sr_io4"
+#define MSC313_PINNAME_SR_IO5 "sr_io5"
+#define MSC313_PINNAME_SR_IO6 "sr_io6"
+#define MSC313_PINNAME_SR_IO7 "sr_io7"
+#define MSC313_PINNAME_SR_IO8 "sr_io8"
+#define MSC313_PINNAME_SR_IO9 "sr_io9"
+#define MSC313_PINNAME_SR_IO10 "sr_io10"
+#define MSC313_PINNAME_SR_IO11 "sr_io11"
+#define MSC313_PINNAME_SR_IO12 "sr_io12"
+#define MSC313_PINNAME_SR_IO13 "sr_io13"
+#define MSC313_PINNAME_SR_IO14 "sr_io14"
+#define MSC313_PINNAME_SR_IO15 "sr_io15"
+#define MSC313_PINNAME_SR_IO16 "sr_io16"
+#define MSC313_PINNAME_SR_IO17 "sr_io17"
+
+/* pad names for sd, same for all SoCs so far */
+#define MSC313_PINNAME_SD_CLK "sd_clk"
+#define MSC313_PINNAME_SD_CMD "sd_cmd"
+#define MSC313_PINNAME_SD_D0 "sd_d0"
+#define MSC313_PINNAME_SD_D1 "sd_d1"
+#define MSC313_PINNAME_SD_D2 "sd_d2"
+#define MSC313_PINNAME_SD_D3 "sd_d3"
+
+/* pad names for i2c1, same for all SoCs so for */
+#define MSC313_PINNAME_I2C1_SCL "i2c1_scl"
+#define MSC313_PINNAME_I2C1_SCA "i2c1_sda"
+
+/* pad names for spi0, same for all SoCs so far */
+#define MSC313_PINNAME_SPI0_CZ "spi0_cz"
+#define MSC313_PINNAME_SPI0_CK "spi0_ck"
+#define MSC313_PINNAME_SPI0_DI "spi0_di"
+#define MSC313_PINNAME_SPI0_DO "spi0_do"
+
+#define FUART_NAMES \
+ MSC313_PINNAME_FUART_RX, \
+ MSC313_PINNAME_FUART_TX, \
+ MSC313_PINNAME_FUART_CTS, \
+ MSC313_PINNAME_FUART_RTS
+
+#define OFF_FUART_RX 0x50
+#define OFF_FUART_TX 0x54
+#define OFF_FUART_CTS 0x58
+#define OFF_FUART_RTS 0x5c
+
+#define FUART_OFFSETS \
+ OFF_FUART_RX, \
+ OFF_FUART_TX, \
+ OFF_FUART_CTS, \
+ OFF_FUART_RTS
+
+#define SR_NAMES \
+ MSC313_PINNAME_SR_IO2, \
+ MSC313_PINNAME_SR_IO3, \
+ MSC313_PINNAME_SR_IO4, \
+ MSC313_PINNAME_SR_IO5, \
+ MSC313_PINNAME_SR_IO6, \
+ MSC313_PINNAME_SR_IO7, \
+ MSC313_PINNAME_SR_IO8, \
+ MSC313_PINNAME_SR_IO9, \
+ MSC313_PINNAME_SR_IO10, \
+ MSC313_PINNAME_SR_IO11, \
+ MSC313_PINNAME_SR_IO12, \
+ MSC313_PINNAME_SR_IO13, \
+ MSC313_PINNAME_SR_IO14, \
+ MSC313_PINNAME_SR_IO15, \
+ MSC313_PINNAME_SR_IO16, \
+ MSC313_PINNAME_SR_IO17
+
+#define OFF_SR_IO2 0x88
+#define OFF_SR_IO3 0x8c
+#define OFF_SR_IO4 0x90
+#define OFF_SR_IO5 0x94
+#define OFF_SR_IO6 0x98
+#define OFF_SR_IO7 0x9c
+#define OFF_SR_IO8 0xa0
+#define OFF_SR_IO9 0xa4
+#define OFF_SR_IO10 0xa8
+#define OFF_SR_IO11 0xac
+#define OFF_SR_IO12 0xb0
+#define OFF_SR_IO13 0xb4
+#define OFF_SR_IO14 0xb8
+#define OFF_SR_IO15 0xbc
+#define OFF_SR_IO16 0xc0
+#define OFF_SR_IO17 0xc4
+
+#define SR_OFFSETS \
+ OFF_SR_IO2, \
+ OFF_SR_IO3, \
+ OFF_SR_IO4, \
+ OFF_SR_IO5, \
+ OFF_SR_IO6, \
+ OFF_SR_IO7, \
+ OFF_SR_IO8, \
+ OFF_SR_IO9, \
+ OFF_SR_IO10, \
+ OFF_SR_IO11, \
+ OFF_SR_IO12, \
+ OFF_SR_IO13, \
+ OFF_SR_IO14, \
+ OFF_SR_IO15, \
+ OFF_SR_IO16, \
+ OFF_SR_IO17
+
+#define SD_NAMES \
+ MSC313_PINNAME_SD_CLK, \
+ MSC313_PINNAME_SD_CMD, \
+ MSC313_PINNAME_SD_D0, \
+ MSC313_PINNAME_SD_D1, \
+ MSC313_PINNAME_SD_D2, \
+ MSC313_PINNAME_SD_D3
+
+#define OFF_SD_CLK 0x140
+#define OFF_SD_CMD 0x144
+#define OFF_SD_D0 0x148
+#define OFF_SD_D1 0x14c
+#define OFF_SD_D2 0x150
+#define OFF_SD_D3 0x154
+
+#define SD_OFFSETS \
+ OFF_SD_CLK, \
+ OFF_SD_CMD, \
+ OFF_SD_D0, \
+ OFF_SD_D1, \
+ OFF_SD_D2, \
+ OFF_SD_D3
+
+#define I2C1_NAMES \
+ MSC313_PINNAME_I2C1_SCL, \
+ MSC313_PINNAME_I2C1_SCA
+
+#define OFF_I2C1_SCL 0x188
+#define OFF_I2C1_SCA 0x18c
+
+#define I2C1_OFFSETS \
+ OFF_I2C1_SCL, \
+ OFF_I2C1_SCA
+
+#define SPI0_NAMES \
+ MSC313_PINNAME_SPI0_CZ, \
+ MSC313_PINNAME_SPI0_CK, \
+ MSC313_PINNAME_SPI0_DI, \
+ MSC313_PINNAME_SPI0_DO
+
+#define OFF_SPI0_CZ 0x1c0
+#define OFF_SPI0_CK 0x1c4
+#define OFF_SPI0_DI 0x1c8
+#define OFF_SPI0_DO 0x1cc
+
+#define SPI0_OFFSETS \
+ OFF_SPI0_CZ, \
+ OFF_SPI0_CK, \
+ OFF_SPI0_DI, \
+ OFF_SPI0_DO
+
+struct msc313_gpio_data {
+ const char * const *names;
+ const unsigned int *offsets;
+ const unsigned int num;
+};
+
+#define MSC313_GPIO_CHIPDATA(_chip) \
+static const struct msc313_gpio_data _chip##_data = { \
+ .names = _chip##_names, \
+ .offsets = _chip##_offsets, \
+ .num = ARRAY_SIZE(_chip##_offsets), \
+}
+
+#ifdef CONFIG_MACH_INFINITY
+static const char * const msc313_names[] = {
+ FUART_NAMES,
+ SR_NAMES,
+ SD_NAMES,
+ I2C1_NAMES,
+ SPI0_NAMES,
+};
+
+static const unsigned int msc313_offsets[] = {
+ FUART_OFFSETS,
+ SR_OFFSETS,
+ SD_OFFSETS,
+ I2C1_OFFSETS,
+ SPI0_OFFSETS,
+};
+
+MSC313_GPIO_CHIPDATA(msc313);
+#endif
+
+struct msc313_gpio {
+ void __iomem *base;
+ const struct msc313_gpio_data *gpio_data;
+ u8 *saved;
+};
+
+static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct msc313_gpio *gpio = gpiochip_get_data(chip);
+ u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]);
+
+ if (value)
+ gpioreg |= MSC313_GPIO_OUT;
+ else
+ gpioreg &= ~MSC313_GPIO_OUT;
+
+ writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]);
+}
+
+static int msc313_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct msc313_gpio *gpio = gpiochip_get_data(chip);
+
+ return readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]) & MSC313_GPIO_IN;
+}
+
+static int msc313_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct msc313_gpio *gpio = gpiochip_get_data(chip);
+ u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]);
+
+ gpioreg |= MSC313_GPIO_OEN;
+ writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]);
+
+ return 0;
+}
+
+static int msc313_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct msc313_gpio *gpio = gpiochip_get_data(chip);
+ u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]);
+
+ gpioreg &= ~MSC313_GPIO_OEN;
+ if (value)
+ gpioreg |= MSC313_GPIO_OUT;
+ else
+ gpioreg &= ~MSC313_GPIO_OUT;
+ writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]);
+
+ return 0;
+}
+
+/*
+ * The interrupt handling happens in the parent interrupt controller,
+ * we don't do anything here.
+ */
+static struct irq_chip msc313_gpio_irqchip = {
+ .name = "GPIO",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+/*
+ * The parent interrupt controller needs the GIC interrupt type set to GIC_SPI
+ * so we need to provide the fwspec. Essentially gpiochip_populate_parent_fwspec_twocell
+ * that puts GIC_SPI into the first cell.
+ */
+static void *msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = gc->irq.parent_domain->fwnode;
+ fwspec->param_count = 3;
+ fwspec->param[0] = GIC_SPI;
+ fwspec->param[1] = parent_hwirq;
+ fwspec->param[2] = parent_type;
+
+ return fwspec;
+}
+
+static int msc313e_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ struct msc313_gpio *priv = gpiochip_get_data(chip);
+ unsigned int offset = priv->gpio_data->offsets[child];
+
+ /*
+ * only the spi0 pins have interrupts on the parent
+ * on all of the known chips and so far they are all
+ * mapped to the same place
+ */
+ if (offset >= OFF_SPI0_CZ && offset <= OFF_SPI0_DO) {
+ *parent_type = child_type;
+ *parent = ((offset - OFF_SPI0_CZ) >> 2) + 28;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int msc313_gpio_probe(struct platform_device *pdev)
+{
+ const struct msc313_gpio_data *match_data;
+ struct msc313_gpio *gpio;
+ struct gpio_chip *gpiochip;
+ struct gpio_irq_chip *gpioirqchip;
+ struct irq_domain *parent_domain;
+ struct device_node *parent_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return -EINVAL;
+
+ parent_node = of_irq_find_parent(dev->of_node);
+ if (!parent_node)
+ return -ENODEV;
+
+ parent_domain = irq_find_host(parent_node);
+ if (!parent_domain)
+ return -ENODEV;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->gpio_data = match_data;
+
+ gpio->saved = devm_kcalloc(dev, gpio->gpio_data->num, sizeof(*gpio->saved), GFP_KERNEL);
+ if (!gpio->saved)
+ return -ENOMEM;
+
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
+
+ platform_set_drvdata(pdev, gpio);
+
+ gpiochip = devm_kzalloc(dev, sizeof(*gpiochip), GFP_KERNEL);
+ if (!gpiochip)
+ return -ENOMEM;
+
+ gpiochip->label = DRIVER_NAME;
+ gpiochip->parent = dev;
+ gpiochip->request = gpiochip_generic_request;
+ gpiochip->free = gpiochip_generic_free;
+ gpiochip->direction_input = msc313_gpio_direction_input;
+ gpiochip->direction_output = msc313_gpio_direction_output;
+ gpiochip->get = msc313_gpio_get;
+ gpiochip->set = msc313_gpio_set;
+ gpiochip->base = -1;
+ gpiochip->ngpio = gpio->gpio_data->num;
+ gpiochip->names = gpio->gpio_data->names;
+
+ gpioirqchip = &gpiochip->irq;
+ gpioirqchip->chip = &msc313_gpio_irqchip;
+ gpioirqchip->fwnode = of_node_to_fwnode(dev->of_node);
+ gpioirqchip->parent_domain = parent_domain;
+ gpioirqchip->child_to_parent_hwirq = msc313e_gpio_child_to_parent_hwirq;
+ gpioirqchip->populate_parent_alloc_arg = msc313_gpio_populate_parent_fwspec;
+ gpioirqchip->handler = handle_bad_irq;
+ gpioirqchip->default_type = IRQ_TYPE_NONE;
+
+ ret = devm_gpiochip_add_data(dev, gpiochip, gpio);
+ return ret;
+}
+
+static int msc313_gpio_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id msc313_gpio_of_match[] = {
+#ifdef CONFIG_MACH_INFINITY
+ {
+ .compatible = "mstar,msc313-gpio",
+ .data = &msc313_data,
+ },
+#endif
+ { }
+};
+
+/*
+ * The GPIO controller loses the state of the registers when the
+ * SoC goes into suspend to memory mode so we need to save some
+ * of the register bits before suspending and put it back when resuming
+ */
+static int __maybe_unused msc313_gpio_suspend(struct device *dev)
+{
+ struct msc313_gpio *gpio = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < gpio->gpio_data->num; i++)
+ gpio->saved[i] = readb_relaxed(gpio->base + gpio->gpio_data->offsets[i]) & MSC313_GPIO_BITSTOSAVE;
+
+ return 0;
+}
+
+static int __maybe_unused msc313_gpio_resume(struct device *dev)
+{
+ struct msc313_gpio *gpio = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < gpio->gpio_data->num; i++)
+ writeb_relaxed(gpio->saved[i], gpio->base + gpio->gpio_data->offsets[i]);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(msc313_gpio_ops, msc313_gpio_suspend, msc313_gpio_resume);
+
+static struct platform_driver msc313_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = msc313_gpio_of_match,
+ .pm = &msc313_gpio_ops,
+ },
+ .probe = msc313_gpio_probe,
+ .remove = msc313_gpio_remove,
+};
+
+builtin_platform_driver(msc313_gpio_driver);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 2f245594a90a..a912a8fed197 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -78,8 +78,7 @@
/*
* The Armada XP has per-CPU registers for interrupt cause, interrupt
- * mask and interrupt level mask. Those are relative to the
- * percpu_membase.
+ * mask and interrupt level mask. Those are in percpu_regs range.
*/
#define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4)
#define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4)
@@ -93,7 +92,7 @@
#define MVEBU_MAX_GPIO_PER_BANK 32
struct mvebu_pwm {
- void __iomem *membase;
+ struct regmap *regs;
unsigned long clk_rate;
struct gpio_desc *gpiod;
struct pwm_chip chip;
@@ -279,17 +278,17 @@ mvebu_gpio_write_level_mask(struct mvebu_gpio_chip *mvchip, u32 val)
}
/*
- * Functions returning addresses of individual registers for a given
+ * Functions returning offsets of individual registers for a given
* PWM controller.
*/
-static void __iomem *mvebu_pwmreg_blink_on_duration(struct mvebu_pwm *mvpwm)
+static unsigned int mvebu_pwmreg_blink_on_duration(struct mvebu_pwm *mvpwm)
{
- return mvpwm->membase + PWM_BLINK_ON_DURATION_OFF;
+ return PWM_BLINK_ON_DURATION_OFF;
}
-static void __iomem *mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
+static unsigned int mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
{
- return mvpwm->membase + PWM_BLINK_OFF_DURATION_OFF;
+ return PWM_BLINK_OFF_DURATION_OFF;
}
/*
@@ -600,6 +599,13 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static const struct regmap_config mvebu_gpio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
/*
* Functions implementing the pwm_chip methods
*/
@@ -660,9 +666,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
spin_lock_irqsave(&mvpwm->lock, flags);
- val = (unsigned long long)
- readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
- val *= NSEC_PER_SEC;
+ regmap_read(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), &u);
+ val = (unsigned long long) u * NSEC_PER_SEC;
do_div(val, mvpwm->clk_rate);
if (val > UINT_MAX)
state->duty_cycle = UINT_MAX;
@@ -671,21 +676,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
else
state->duty_cycle = 1;
- val = (unsigned long long)
- readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
+ val = (unsigned long long) u; /* on duration */
+ regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
+ val += (unsigned long long) u; /* period = on + off duration */
val *= NSEC_PER_SEC;
do_div(val, mvpwm->clk_rate);
- if (val < state->duty_cycle) {
+ if (val > UINT_MAX)
+ state->period = UINT_MAX;
+ else if (val)
+ state->period = val;
+ else
state->period = 1;
- } else {
- val -= state->duty_cycle;
- if (val > UINT_MAX)
- state->period = UINT_MAX;
- else if (val)
- state->period = val;
- else
- state->period = 1;
- }
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
if (u)
@@ -726,8 +727,8 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
spin_lock_irqsave(&mvpwm->lock, flags);
- writel_relaxed(on, mvebu_pwmreg_blink_on_duration(mvpwm));
- writel_relaxed(off, mvebu_pwmreg_blink_off_duration(mvpwm));
+ regmap_write(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), on);
+ regmap_write(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), off);
if (state->enabled)
mvebu_gpio_blink(&mvchip->chip, pwm->hwpwm, 1);
else
@@ -752,10 +753,10 @@ static void __maybe_unused mvebu_pwm_suspend(struct mvebu_gpio_chip *mvchip)
regmap_read(mvchip->regs, GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset,
&mvpwm->blink_select);
- mvpwm->blink_on_duration =
- readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
- mvpwm->blink_off_duration =
- readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
+ regmap_read(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm),
+ &mvpwm->blink_on_duration);
+ regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm),
+ &mvpwm->blink_off_duration);
}
static void __maybe_unused mvebu_pwm_resume(struct mvebu_gpio_chip *mvchip)
@@ -764,10 +765,10 @@ static void __maybe_unused mvebu_pwm_resume(struct mvebu_gpio_chip *mvchip)
regmap_write(mvchip->regs, GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset,
mvpwm->blink_select);
- writel_relaxed(mvpwm->blink_on_duration,
- mvebu_pwmreg_blink_on_duration(mvpwm));
- writel_relaxed(mvpwm->blink_off_duration,
- mvebu_pwmreg_blink_off_duration(mvpwm));
+ regmap_write(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm),
+ mvpwm->blink_on_duration);
+ regmap_write(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm),
+ mvpwm->blink_off_duration);
}
static int mvebu_pwm_probe(struct platform_device *pdev,
@@ -776,6 +777,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct mvebu_pwm *mvpwm;
+ void __iomem *base;
u32 set;
if (!of_device_is_compatible(mvchip->chip.of_node,
@@ -813,9 +815,14 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
- mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
- if (IS_ERR(mvpwm->membase))
- return PTR_ERR(mvpwm->membase);
+ base = devm_platform_ioremap_resource_byname(pdev, "pwm");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mvpwm->regs = devm_regmap_init_mmio(&pdev->dev, base,
+ &mvebu_gpio_regmap_config);
+ if (IS_ERR(mvpwm->regs))
+ return PTR_ERR(mvpwm->regs);
mvpwm->clk_rate = clk_get_rate(mvchip->clk);
if (!mvpwm->clk_rate) {
@@ -1022,13 +1029,6 @@ static int mvebu_gpio_resume(struct platform_device *pdev)
return 0;
}
-static const struct regmap_config mvebu_gpio_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .fast_io = true,
-};
-
static int mvebu_gpio_probe_raw(struct platform_device *pdev,
struct mvebu_gpio_chip *mvchip)
{
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 643f4c557ac2..157106e1e438 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -24,13 +24,6 @@
#include <linux/of_device.h>
#include <linux/bug.h>
-enum mxc_gpio_hwtype {
- IMX1_GPIO, /* runs on i.mx1 */
- IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
- IMX31_GPIO, /* runs on i.mx31 */
- IMX35_GPIO, /* runs on all other i.mx */
-};
-
/* device type dependent stuff */
struct mxc_gpio_hwdata {
unsigned dr_reg;
@@ -68,6 +61,7 @@ struct mxc_gpio_port {
u32 both_edges;
struct mxc_gpio_reg_saved gpio_saved_reg;
bool power_off;
+ const struct mxc_gpio_hwdata *hwdata;
};
static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
@@ -115,48 +109,27 @@ static struct mxc_gpio_hwdata imx35_gpio_hwdata = {
.fall_edge = 0x03,
};
-static enum mxc_gpio_hwtype mxc_gpio_hwtype;
-static struct mxc_gpio_hwdata *mxc_gpio_hwdata;
-
-#define GPIO_DR (mxc_gpio_hwdata->dr_reg)
-#define GPIO_GDIR (mxc_gpio_hwdata->gdir_reg)
-#define GPIO_PSR (mxc_gpio_hwdata->psr_reg)
-#define GPIO_ICR1 (mxc_gpio_hwdata->icr1_reg)
-#define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg)
-#define GPIO_IMR (mxc_gpio_hwdata->imr_reg)
-#define GPIO_ISR (mxc_gpio_hwdata->isr_reg)
-#define GPIO_EDGE_SEL (mxc_gpio_hwdata->edge_sel_reg)
-
-#define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level)
-#define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level)
-#define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge)
-#define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge)
+#define GPIO_DR (port->hwdata->dr_reg)
+#define GPIO_GDIR (port->hwdata->gdir_reg)
+#define GPIO_PSR (port->hwdata->psr_reg)
+#define GPIO_ICR1 (port->hwdata->icr1_reg)
+#define GPIO_ICR2 (port->hwdata->icr2_reg)
+#define GPIO_IMR (port->hwdata->imr_reg)
+#define GPIO_ISR (port->hwdata->isr_reg)
+#define GPIO_EDGE_SEL (port->hwdata->edge_sel_reg)
+
+#define GPIO_INT_LOW_LEV (port->hwdata->low_level)
+#define GPIO_INT_HIGH_LEV (port->hwdata->high_level)
+#define GPIO_INT_RISE_EDGE (port->hwdata->rise_edge)
+#define GPIO_INT_FALL_EDGE (port->hwdata->fall_edge)
#define GPIO_INT_BOTH_EDGES 0x4
-static const struct platform_device_id mxc_gpio_devtype[] = {
- {
- .name = "imx1-gpio",
- .driver_data = IMX1_GPIO,
- }, {
- .name = "imx21-gpio",
- .driver_data = IMX21_GPIO,
- }, {
- .name = "imx31-gpio",
- .driver_data = IMX31_GPIO,
- }, {
- .name = "imx35-gpio",
- .driver_data = IMX35_GPIO,
- }, {
- /* sentinel */
- }
-};
-
static const struct of_device_id mxc_gpio_dt_ids[] = {
- { .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], },
- { .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
- { .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
- { .compatible = "fsl,imx35-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
- { .compatible = "fsl,imx7d-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
+ { .compatible = "fsl,imx1-gpio", .data = &imx1_imx21_gpio_hwdata },
+ { .compatible = "fsl,imx21-gpio", .data = &imx1_imx21_gpio_hwdata },
+ { .compatible = "fsl,imx31-gpio", .data = &imx31_gpio_hwdata },
+ { .compatible = "fsl,imx35-gpio", .data = &imx35_gpio_hwdata },
+ { .compatible = "fsl,imx7d-gpio", .data = &imx35_gpio_hwdata },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxc_gpio_dt_ids);
@@ -372,36 +345,6 @@ static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
return rv;
}
-static void mxc_gpio_get_hw(struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(mxc_gpio_dt_ids, &pdev->dev);
- enum mxc_gpio_hwtype hwtype;
-
- if (of_id)
- pdev->id_entry = of_id->data;
- hwtype = pdev->id_entry->driver_data;
-
- if (mxc_gpio_hwtype) {
- /*
- * The driver works with a reasonable presupposition,
- * that is all gpio ports must be the same type when
- * running on one soc.
- */
- BUG_ON(mxc_gpio_hwtype != hwtype);
- return;
- }
-
- if (hwtype == IMX35_GPIO)
- mxc_gpio_hwdata = &imx35_gpio_hwdata;
- else if (hwtype == IMX31_GPIO)
- mxc_gpio_hwdata = &imx31_gpio_hwdata;
- else
- mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata;
-
- mxc_gpio_hwtype = hwtype;
-}
-
static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct mxc_gpio_port *port = gpiochip_get_data(gc);
@@ -417,14 +360,14 @@ static int mxc_gpio_probe(struct platform_device *pdev)
int irq_base;
int err;
- mxc_gpio_get_hw(pdev);
-
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->dev = &pdev->dev;
+ port->hwdata = device_get_match_data(&pdev->dev);
+
port->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
@@ -461,7 +404,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
writel(0, port->base + GPIO_IMR);
writel(~0, port->base + GPIO_ISR);
- if (mxc_gpio_hwtype == IMX21_GPIO) {
+ if (of_device_is_compatible(np, "fsl,imx21-gpio")) {
/*
* Setup one handler for all GPIO interrupts. Actually setting
* the handler is needed only once, but doing it for every port
@@ -596,7 +539,6 @@ static struct platform_driver mxc_gpio_driver = {
.suppress_bind_attrs = true,
},
.probe = mxc_gpio_probe,
- .id_table = mxc_gpio_devtype,
};
static int __init gpio_mxc_init(void)
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index c4a314c68555..dfc0c1eb1b33 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -254,19 +254,6 @@ static int mxs_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
return GPIO_LINE_DIRECTION_IN;
}
-static const struct platform_device_id mxs_gpio_ids[] = {
- {
- .name = "imx23-gpio",
- .driver_data = IMX23_GPIO,
- }, {
- .name = "imx28-gpio",
- .driver_data = IMX28_GPIO,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, mxs_gpio_ids);
-
static const struct of_device_id mxs_gpio_dt_ids[] = {
{ .compatible = "fsl,imx23-gpio", .data = (void *) IMX23_GPIO, },
{ .compatible = "fsl,imx28-gpio", .data = (void *) IMX28_GPIO, },
@@ -370,7 +357,6 @@ static struct platform_driver mxs_gpio_driver = {
.suppress_bind_attrs = true,
},
.probe = mxs_gpio_probe,
- .id_table = mxs_gpio_ids,
};
static int __init mxs_gpio_init(void)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index f7ceb2b11afc..41952bb818ad 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1049,11 +1049,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
irq->first = irq_base;
ret = gpiochip_add_data(&bank->chip, bank);
- if (ret) {
- dev_err(bank->chip.parent,
- "Could not register gpio chip %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n");
ret = devm_request_irq(bank->chip.parent, bank->irq,
omap_gpio_irq_handler,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 3ef19cef8da9..0b572dbc4a36 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -32,6 +32,11 @@ struct gpio_rcar_bank_info {
u32 intmsk;
};
+struct gpio_rcar_info {
+ bool has_outdtsel;
+ bool has_both_edge_trigger;
+};
+
struct gpio_rcar_priv {
void __iomem *base;
spinlock_t lock;
@@ -40,24 +45,23 @@ struct gpio_rcar_priv {
struct irq_chip irq_chip;
unsigned int irq_parent;
atomic_t wakeup_path;
- bool has_outdtsel;
- bool has_both_edge_trigger;
+ struct gpio_rcar_info info;
struct gpio_rcar_bank_info bank_info;
};
-#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
-#define INOUTSEL 0x04 /* General Input/Output Switching Register */
-#define OUTDT 0x08 /* General Output Register */
-#define INDT 0x0c /* General Input Register */
-#define INTDT 0x10 /* Interrupt Display Register */
-#define INTCLR 0x14 /* Interrupt Clear Register */
-#define INTMSK 0x18 /* Interrupt Mask Register */
-#define MSKCLR 0x1c /* Interrupt Mask Clear Register */
-#define POSNEG 0x20 /* Positive/Negative Logic Select Register */
-#define EDGLEVEL 0x24 /* Edge/level Select Register */
-#define FILONOFF 0x28 /* Chattering Prevention On/Off Register */
-#define OUTDTSEL 0x40 /* Output Data Select Register */
-#define BOTHEDGE 0x4c /* One Edge/Both Edge Select Register */
+#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
+#define INOUTSEL 0x04 /* General Input/Output Switching Register */
+#define OUTDT 0x08 /* General Output Register */
+#define INDT 0x0c /* General Input Register */
+#define INTDT 0x10 /* Interrupt Display Register */
+#define INTCLR 0x14 /* Interrupt Clear Register */
+#define INTMSK 0x18 /* Interrupt Mask Register */
+#define MSKCLR 0x1c /* Interrupt Mask Clear Register */
+#define POSNEG 0x20 /* Positive/Negative Logic Select Register */
+#define EDGLEVEL 0x24 /* Edge/level Select Register */
+#define FILONOFF 0x28 /* Chattering Prevention On/Off Register */
+#define OUTDTSEL 0x40 /* Output Data Select Register */
+#define BOTHEDGE 0x4c /* One Edge/Both Edge Select Register */
#define RCAR_MAX_GPIO_PER_BANK 32
@@ -123,7 +127,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
gpio_rcar_modify_bit(p, EDGLEVEL, hwirq, !level_trigger);
/* Select one edge or both edges in BOTHEDGE */
- if (p->has_both_edge_trigger)
+ if (p->info.has_both_edge_trigger)
gpio_rcar_modify_bit(p, BOTHEDGE, hwirq, both);
/* Select "Interrupt Input Mode" in IOINTSEL */
@@ -162,7 +166,7 @@ static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
false);
break;
case IRQ_TYPE_EDGE_BOTH:
- if (!p->has_both_edge_trigger)
+ if (!p->info.has_both_edge_trigger)
return -EINVAL;
gpio_rcar_config_interrupt_input_mode(p, hwirq, true, false,
true);
@@ -238,7 +242,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
gpio_rcar_modify_bit(p, INOUTSEL, gpio, output);
/* Select General Output Register to output data in OUTDTSEL */
- if (p->has_outdtsel && output)
+ if (p->info.has_outdtsel && output)
gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
spin_unlock_irqrestore(&p->lock, flags);
@@ -295,14 +299,44 @@ static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
{
+ struct gpio_rcar_priv *p = gpiochip_get_data(chip);
u32 bit = BIT(offset);
/* testing on r8a7790 shows that INDT does not show correct pin state
* when configured as output, so use OUTDT in case of output pins */
- if (gpio_rcar_read(gpiochip_get_data(chip), INOUTSEL) & bit)
- return !!(gpio_rcar_read(gpiochip_get_data(chip), OUTDT) & bit);
+ if (gpio_rcar_read(p, INOUTSEL) & bit)
+ return !!(gpio_rcar_read(p, OUTDT) & bit);
else
- return !!(gpio_rcar_read(gpiochip_get_data(chip), INDT) & bit);
+ return !!(gpio_rcar_read(p, INDT) & bit);
+}
+
+static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_rcar_priv *p = gpiochip_get_data(chip);
+ u32 bankmask, outputs, m, val = 0;
+ unsigned long flags;
+
+ bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
+ if (chip->valid_mask)
+ bankmask &= chip->valid_mask[0];
+
+ if (!bankmask)
+ return 0;
+
+ spin_lock_irqsave(&p->lock, flags);
+ outputs = gpio_rcar_read(p, INOUTSEL);
+ m = outputs & bankmask;
+ if (m)
+ val |= gpio_rcar_read(p, OUTDT) & m;
+
+ m = ~outputs & bankmask;
+ if (m)
+ val |= gpio_rcar_read(p, INDT) & m;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ bits[0] = val;
+ return 0;
}
static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -346,11 +380,6 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-struct gpio_rcar_info {
- bool has_outdtsel;
- bool has_both_edge_trigger;
-};
-
static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
.has_outdtsel = false,
.has_both_edge_trigger = false,
@@ -417,8 +446,7 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
int ret;
info = of_device_get_match_data(p->dev);
- p->has_outdtsel = info->has_outdtsel;
- p->has_both_edge_trigger = info->has_both_edge_trigger;
+ p->info = *info;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
@@ -479,6 +507,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->get_direction = gpio_rcar_get_direction;
gpio_chip->direction_input = gpio_rcar_direction_input;
gpio_chip->get = gpio_rcar_get;
+ gpio_chip->get_multiple = gpio_rcar_get_multiple;
gpio_chip->direction_output = gpio_rcar_direction_output;
gpio_chip->set = gpio_rcar_set;
gpio_chip->set_multiple = gpio_rcar_set_multiple;
@@ -552,7 +581,7 @@ static int gpio_rcar_suspend(struct device *dev)
p->bank_info.intmsk = gpio_rcar_read(p, INTMSK);
p->bank_info.posneg = gpio_rcar_read(p, POSNEG);
p->bank_info.edglevel = gpio_rcar_read(p, EDGLEVEL);
- if (p->has_both_edge_trigger)
+ if (p->info.has_both_edge_trigger)
p->bank_info.bothedge = gpio_rcar_read(p, BOTHEDGE);
if (atomic_read(&p->wakeup_path))
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index d5eb9ca11901..403f9e833d6a 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -29,7 +29,6 @@
#define SIFIVE_GPIO_OUTPUT_XOR 0x40
#define SIFIVE_GPIO_MAX 32
-#define SIFIVE_GPIO_IRQ_OFFSET 7
struct sifive_gpio {
void __iomem *base;
@@ -37,7 +36,7 @@ struct sifive_gpio {
struct regmap *regs;
unsigned long irq_state;
unsigned int trigger[SIFIVE_GPIO_MAX];
- unsigned int irq_parent[SIFIVE_GPIO_MAX];
+ unsigned int irq_number[SIFIVE_GPIO_MAX];
};
static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
@@ -128,6 +127,16 @@ static void sifive_gpio_irq_eoi(struct irq_data *d)
irq_chip_eoi_parent(d);
}
+static int sifive_gpio_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (data->parent_data)
+ return irq_chip_set_affinity_parent(data, dest, force);
+
+ return -EINVAL;
+}
+
static struct irq_chip sifive_gpio_irqchip = {
.name = "sifive-gpio",
.irq_set_type = sifive_gpio_irq_set_type,
@@ -136,6 +145,7 @@ static struct irq_chip sifive_gpio_irqchip = {
.irq_enable = sifive_gpio_irq_enable,
.irq_disable = sifive_gpio_irq_disable,
.irq_eoi = sifive_gpio_irq_eoi,
+ .irq_set_affinity = sifive_gpio_irq_set_affinity,
};
static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
@@ -144,8 +154,12 @@ static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
unsigned int *parent,
unsigned int *parent_type)
{
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ struct irq_data *d = irq_get_irq_data(chip->irq_number[child]);
+
*parent_type = IRQ_TYPE_NONE;
- *parent = child + SIFIVE_GPIO_IRQ_OFFSET;
+ *parent = irqd_to_hwirq(d);
+
return 0;
}
@@ -165,7 +179,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
struct irq_domain *parent;
struct gpio_irq_chip *girq;
struct sifive_gpio *chip;
- int ret, ngpio;
+ int ret, ngpio, i;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -200,6 +214,9 @@ static int sifive_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
+ for (i = 0; i < ngpio; i++)
+ chip->irq_number[i] = platform_get_irq(pdev, i);
+
ret = bgpio_init(&chip->gc, dev, 4,
chip->base + SIFIVE_GPIO_INPUT_VAL,
chip->base + SIFIVE_GPIO_OUTPUT_VAL,
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index b0155d6007c8..b94ef8181428 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -474,15 +474,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
stmpe_gpio->chip.parent = &pdev->dev;
stmpe_gpio->chip.of_node = np;
stmpe_gpio->chip.base = -1;
- /*
- * REVISIT: this makes sure the valid mask gets allocated and
- * filled in when adding the gpio_chip, but the rest of the
- * gpio_irqchip is still filled in using the old method
- * in gpiochip_irqchip_add_nested() so clean this up once we
- * get the gpio_irqchip to initialize while adding the
- * gpio_chip also for threaded irqchips.
- */
- stmpe_gpio->chip.irq.init_valid_mask = stmpe_init_irq_valid_mask;
if (IS_ENABLED(CONFIG_DEBUG_FS))
stmpe_gpio->chip.dbg_show = stmpe_dbg_show;
@@ -520,6 +511,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
girq->threaded = true;
+ girq->init_valid_mask = stmpe_init_irq_valid_mask;
}
ret = gpiochip_add_data(&stmpe_gpio->chip, stmpe_gpio);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 86568154cdb3..e19ebff6018c 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -61,8 +61,16 @@ struct tegra_gpio_info;
struct tegra_gpio_bank {
unsigned int bank;
unsigned int irq;
- spinlock_t lvl_lock[4];
- spinlock_t dbc_lock[4]; /* Lock for updating debounce count register */
+
+ /*
+ * IRQ-core code uses raw locking, and thus, nested locking also
+ * should be raw in order not to trip spinlock debug warnings.
+ */
+ raw_spinlock_t lvl_lock[4];
+
+ /* Lock for updating debounce count register */
+ spinlock_t dbc_lock[4];
+
#ifdef CONFIG_PM_SLEEP
u32 cnf[4];
u32 out[4];
@@ -334,14 +342,14 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- spin_lock_irqsave(&bank->lvl_lock[port], flags);
+ raw_spin_lock_irqsave(&bank->lvl_lock[port], flags);
val = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio));
val |= lvl_type << GPIO_BIT(gpio);
tegra_gpio_writel(tgi, val, GPIO_INT_LVL(tgi, gpio));
- spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
+ raw_spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, gpio), gpio, 0);
tegra_gpio_enable(tgi, gpio);
@@ -560,6 +568,9 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
+static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
+
static int tegra_gpio_probe(struct platform_device *pdev)
{
struct tegra_gpio_info *tgi;
@@ -661,6 +672,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[GPIO_BANK(gpio)];
irq_set_chip_data(irq, bank);
+ irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
@@ -671,7 +683,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tegra_gpio_irq_handler, bank);
for (j = 0; j < 4; j++) {
- spin_lock_init(&bank->lvl_lock[j]);
+ raw_spin_lock_init(&bank->lvl_lock[j]);
spin_lock_init(&bank->dbc_lock[j]);
}
}
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 9500074b1f1b..286e0b1f46e4 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -444,6 +444,16 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
+static int tegra186_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (data->parent_data)
+ return irq_chip_set_affinity_parent(data, dest, force);
+
+ return -EINVAL;
+}
+
static void tegra186_gpio_irq(struct irq_desc *desc)
{
struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
@@ -690,6 +700,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->intc.irq_unmask = tegra186_irq_unmask;
gpio->intc.irq_set_type = tegra186_irq_set_type;
gpio->intc.irq_set_wake = tegra186_irq_set_wake;
+ gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
irq = &gpio->gpio.irq;
irq->chip = &gpio->intc;
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 0c785b0fd161..0c0b445c75c0 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -28,7 +28,7 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
struct tps65910 *tps65910 = tps65910_gpio->tps65910;
unsigned int val;
- tps65910_reg_read(tps65910, TPS65910_GPIO0 + offset, &val);
+ regmap_read(tps65910->regmap, TPS65910_GPIO0 + offset, &val);
if (val & GPIO_STS_MASK)
return 1;
@@ -43,10 +43,10 @@ static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
struct tps65910 *tps65910 = tps65910_gpio->tps65910;
if (value)
- tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
GPIO_SET_MASK);
else
- tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
GPIO_SET_MASK);
}
@@ -59,7 +59,7 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
/* Set the initial value */
tps65910_gpio_set(gc, offset, value);
- return tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ return regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
}
@@ -68,7 +68,7 @@ static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc);
struct tps65910 *tps65910 = tps65910_gpio->tps65910;
- return tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ return regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
}
@@ -157,7 +157,7 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
if (!pdata->en_gpio_sleep[i])
continue;
- ret = tps65910_reg_set_bits(tps65910,
+ ret = regmap_set_bits(tps65910->regmap,
TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
if (ret < 0)
dev_warn(tps65910->dev,
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 67f9f82e0db0..be539381fd82 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -6,13 +6,14 @@
*/
#include <linux/bitops.h>
-#include <linux/init.h>
+#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <linux/io.h>
-#include <linux/gpio/driver.h>
#include <linux/slab.h>
/* Register Offset Definitions */
@@ -38,6 +39,7 @@
* @gpio_state: GPIO state shadow register
* @gpio_dir: GPIO direction shadow register
* @gpio_lock: Lock used for synchronization
+ * @clk: clock resource for this driver
*/
struct xgpio_instance {
struct gpio_chip gc;
@@ -46,6 +48,7 @@ struct xgpio_instance {
u32 gpio_state[2];
u32 gpio_dir[2];
spinlock_t gpio_lock[2];
+ struct clk *clk;
};
static inline int xgpio_index(struct xgpio_instance *chip, int gpio)
@@ -257,6 +260,23 @@ static void xgpio_save_regs(struct xgpio_instance *chip)
}
/**
+ * xgpio_remove - Remove method for the GPIO device.
+ * @pdev: pointer to the platform device
+ *
+ * This function remove gpiochips and frees all the allocated resources.
+ *
+ * Return: 0 always
+ */
+static int xgpio_remove(struct platform_device *pdev)
+{
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(gpio->clk);
+
+ return 0;
+}
+
+/**
* xgpio_of_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
*
@@ -278,7 +298,8 @@ static int xgpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
/* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state[0]);
+ if (of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state[0]))
+ chip->gpio_state[0] = 0x0;
/* Update GPIO direction shadow register with default value */
if (of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir[0]))
@@ -298,8 +319,9 @@ static int xgpio_probe(struct platform_device *pdev)
if (is_dual) {
/* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default-2",
- &chip->gpio_state[1]);
+ if (of_property_read_u32(np, "xlnx,dout-default-2",
+ &chip->gpio_state[1]))
+ chip->gpio_state[1] = 0x0;
/* Update GPIO direction shadow register with default value */
if (of_property_read_u32(np, "xlnx,tri-default-2",
@@ -334,11 +356,25 @@ static int xgpio_probe(struct platform_device *pdev)
return PTR_ERR(chip->regs);
}
+ chip->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(chip->clk)) {
+ if (PTR_ERR(chip->clk) != -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "Input clock not found\n");
+ return PTR_ERR(chip->clk);
+ }
+
+ status = clk_prepare_enable(chip->clk);
+ if (status < 0) {
+ dev_err(&pdev->dev, "Failed to prepare clk\n");
+ return status;
+ }
+
xgpio_save_regs(chip);
status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
if (status) {
dev_err(&pdev->dev, "failed to add GPIO chip\n");
+ clk_disable_unprepare(chip->clk);
return status;
}
@@ -354,6 +390,7 @@ MODULE_DEVICE_TABLE(of, xgpio_of_match);
static struct platform_driver xgpio_plat_driver = {
.probe = xgpio_probe,
+ .remove = xgpio_remove,
.driver = {
.name = "gpio-xilinx",
.of_match_table = xgpio_of_match,
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index e2cac12092af..49c878cfd5c6 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -186,15 +186,7 @@ static int xra1403_probe(struct spi_device *spi)
return ret;
}
- ret = devm_gpiochip_add_data(&spi->dev, &xra->chip, xra);
- if (ret < 0) {
- dev_err(&spi->dev, "Unable to register gpiochip\n");
- return ret;
- }
-
- spi_set_drvdata(spi, xra);
-
- return 0;
+ return devm_gpiochip_add_data(&spi->dev, &xra->chip, xra);
}
static const struct spi_device_id xra1403_ids[] = {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 834a12f3219e..e37a57d0a2f0 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -205,6 +205,68 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
acpi_gpiochip_request_irq(acpi_gpio, event);
}
+static enum gpiod_flags
+acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
+{
+ /* GpioInt() implies input configuration */
+ if (agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
+ return GPIOD_IN;
+
+ switch (agpio->io_restriction) {
+ case ACPI_IO_RESTRICT_INPUT:
+ return GPIOD_IN;
+ case ACPI_IO_RESTRICT_OUTPUT:
+ /*
+ * ACPI GPIO resources don't contain an initial value for the
+ * GPIO. Therefore we deduce that value from the pull field
+ * and the polarity instead. If the pin is pulled up we assume
+ * default to be high, if it is pulled down we assume default
+ * to be low, otherwise we leave pin untouched. For active low
+ * polarity values will be switched. See also
+ * Documentation/firmware-guide/acpi/gpio-properties.rst.
+ */
+ switch (agpio->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Assume that the BIOS has configured the direction and pull
+ * accordingly.
+ */
+ return GPIOD_ASIS;
+}
+
+static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
+ struct acpi_resource_gpio *agpio,
+ unsigned int index,
+ const char *label)
+{
+ int polarity = GPIO_ACTIVE_HIGH;
+ enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
+ unsigned int pin = agpio->pin_table[index];
+ struct gpio_desc *desc;
+ int ret;
+
+ desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
+ if (IS_ERR(desc))
+ return desc;
+
+ ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
+ if (ret)
+ gpiochip_free_own_desc(desc);
+
+ return ret ? ERR_PTR(ret) : desc;
+}
+
static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
{
const char *controller, *pin_str;
@@ -290,8 +352,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
if (!handler)
return AE_OK;
- desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event",
- GPIO_ACTIVE_HIGH, GPIOD_IN);
+ desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->parent,
"Failed to request GPIO for pin 0x%04X, err %ld\n",
@@ -526,39 +587,6 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
return false;
}
-static enum gpiod_flags
-acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
-{
- switch (agpio->io_restriction) {
- case ACPI_IO_RESTRICT_INPUT:
- return GPIOD_IN;
- case ACPI_IO_RESTRICT_OUTPUT:
- /*
- * ACPI GPIO resources don't contain an initial value for the
- * GPIO. Therefore we deduce that value from the pull field
- * instead. If the pin is pulled up we assume default to be
- * high, if it is pulled down we assume default to be low,
- * otherwise we leave pin untouched.
- */
- switch (agpio->pin_config) {
- case ACPI_PIN_CONFIG_PULLUP:
- return GPIOD_OUT_HIGH;
- case ACPI_PIN_CONFIG_PULLDOWN:
- return GPIOD_OUT_LOW;
- default:
- break;
- }
- default:
- break;
- }
-
- /*
- * Assume that the BIOS has configured the direction and pull
- * accordingly.
- */
- return GPIOD_ASIS;
-}
-
static int
__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
{
@@ -633,7 +661,7 @@ int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
- int pin_index;
+ u16 pin_index;
bool active_low;
struct gpio_desc *desc;
int n;
@@ -649,7 +677,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
if (!lookup->desc) {
const struct acpi_resource_gpio *agpio = &ares->data.gpio;
bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
- int pin_index;
+ u16 pin_index;
if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
lookup->index++;
@@ -664,6 +692,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
lookup->info.pin_config = agpio->pin_config;
+ lookup->info.debounce = agpio->debounce_timeout;
lookup->info.gpioint = gpioint;
/*
@@ -674,13 +703,13 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
* - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
*/
if (lookup->info.gpioint) {
- lookup->info.flags = GPIOD_IN;
lookup->info.polarity = agpio->polarity;
lookup->info.triggering = agpio->triggering;
} else {
- lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio);
lookup->info.polarity = lookup->active_low;
}
+
+ lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity);
}
return 1;
@@ -794,7 +823,7 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
if (ret)
return ERR_PTR(ret);
- dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %d %u\n",
+ dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n",
dev_name(&lookup.info.adev->dev), lookup.index,
lookup.pin_index, lookup.active_low);
} else {
@@ -942,6 +971,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
if (info.gpioint && idx++ == index) {
unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ enum gpiod_flags dflags = GPIOD_ASIS;
char label[32];
int irq;
@@ -952,11 +982,18 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
if (irq < 0)
return irq;
+ acpi_gpio_update_gpiod_flags(&dflags, &info);
+ acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
+
snprintf(label, sizeof(label), "GpioInt() %d", index);
- ret = gpiod_configure_flags(desc, label, lflags, info.flags);
+ ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret < 0)
return ret;
+ ret = gpio_set_debounce_timeout(desc, info.debounce);
+ if (ret)
+ return ret;
+
irq_flags = acpi_dev_get_irq_type(info.triggering,
info.polarity);
@@ -982,7 +1019,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
struct gpio_chip *chip = achip->chip;
struct acpi_resource_gpio *agpio;
struct acpi_resource *ares;
- int pin_index = (int)address;
+ u16 pin_index = address;
acpi_status status;
int length;
int i;
@@ -1005,7 +1042,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
return AE_BAD_PARAMETER;
}
- length = min(agpio->pin_table_length, (u16)(pin_index + bits));
+ length = min_t(u16, agpio->pin_table_length, pin_index + bits);
for (i = pin_index; i < length; ++i) {
int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
@@ -1042,23 +1079,18 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
}
if (!found) {
- enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio);
- const char *label = "ACPI:OpRegion";
-
- desc = gpiochip_request_own_desc(chip, pin, label,
- GPIO_ACTIVE_HIGH,
- flags);
+ desc = acpi_request_own_gpiod(chip, agpio, i, "ACPI:OpRegion");
if (IS_ERR(desc)) {
- status = AE_ERROR;
mutex_unlock(&achip->conn_lock);
+ status = AE_ERROR;
goto out;
}
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
if (!conn) {
- status = AE_NO_MEMORY;
gpiochip_free_own_desc(desc);
mutex_unlock(&achip->conn_lock);
+ status = AE_NO_MEMORY;
goto out;
}
@@ -1070,8 +1102,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
mutex_unlock(&achip->conn_lock);
if (function == ACPI_WRITE)
- gpiod_set_raw_value_cansleep(desc,
- !!((1 << i) & *value));
+ gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i)));
else
*value |= (u64)gpiod_get_raw_value_cansleep(desc) << i;
}
@@ -1132,7 +1163,7 @@ acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
int ret;
*lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- *dflags = 0;
+ *dflags = GPIOD_ASIS;
*name = NULL;
ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios,
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 1c6d65cf0629..e2edb632b2cc 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -18,6 +18,7 @@ struct acpi_device;
* @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
+ * @debounce: debounce timeout as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
struct acpi_gpio_info {
@@ -27,6 +28,7 @@ struct acpi_gpio_info {
int pin_config;
int polarity;
int triggering;
+ unsigned int debounce;
unsigned int quirks;
};
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index e9faeaf65d14..1631727bf0da 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -428,6 +428,12 @@ struct line {
*/
struct linereq *req;
unsigned int irq;
+ /*
+ * eflags is set by edge_detector_setup(), edge_detector_stop() and
+ * edge_detector_update(), which are themselves mutually exclusive,
+ * and is accessed by edge_irq_thread() and debounce_work_func(),
+ * which can both live with a slightly stale value.
+ */
u64 eflags;
/*
* timestamp_ns and req_seqno are accessed only by
@@ -504,11 +510,14 @@ struct linereq {
(GPIO_V2_LINE_FLAG_EDGE_RISING | \
GPIO_V2_LINE_FLAG_EDGE_FALLING)
+#define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
+
#define GPIO_V2_LINE_VALID_FLAGS \
(GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
GPIO_V2_LINE_DIRECTION_FLAGS | \
GPIO_V2_LINE_DRIVE_FLAGS | \
GPIO_V2_LINE_EDGE_FLAGS | \
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
GPIO_V2_LINE_BIAS_FLAGS)
static void linereq_put_event(struct linereq *lr,
@@ -529,11 +538,20 @@ static void linereq_put_event(struct linereq *lr,
pr_debug_ratelimited("event FIFO is full - event dropped\n");
}
+static u64 line_event_timestamp(struct line *line)
+{
+ if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
+ return ktime_get_real_ns();
+
+ return ktime_get_ns();
+}
+
static irqreturn_t edge_irq_thread(int irq, void *p)
{
struct line *line = p;
struct linereq *lr = line->req;
struct gpio_v2_line_event le;
+ u64 eflags;
/* Do not leak kernel stack to userspace */
memset(&le, 0, sizeof(le));
@@ -546,14 +564,14 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
* which case we didn't get the timestamp from
* edge_irq_handler().
*/
- le.timestamp_ns = ktime_get_ns();
+ le.timestamp_ns = line_event_timestamp(line);
if (lr->num_lines != 1)
line->req_seqno = atomic_inc_return(&lr->seqno);
}
line->timestamp_ns = 0;
- if (line->eflags == (GPIO_V2_LINE_FLAG_EDGE_RISING |
- GPIO_V2_LINE_FLAG_EDGE_FALLING)) {
+ eflags = READ_ONCE(line->eflags);
+ if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
int level = gpiod_get_value_cansleep(line->desc);
if (level)
@@ -562,10 +580,10 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
else
/* Emit high-to-low event */
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else if (line->eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
+ } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
/* Emit low-to-high event */
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- } else if (line->eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
+ } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
/* Emit high-to-low event */
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
} else {
@@ -590,7 +608,7 @@ static irqreturn_t edge_irq_handler(int irq, void *p)
* Just store the timestamp in hardirq context so we get it as
* close in time as possible to the actual event.
*/
- line->timestamp_ns = ktime_get_ns();
+ line->timestamp_ns = line_event_timestamp(line);
if (lr->num_lines != 1)
line->req_seqno = atomic_inc_return(&lr->seqno);
@@ -634,6 +652,7 @@ static void debounce_work_func(struct work_struct *work)
struct line *line = container_of(work, struct line, work.work);
struct linereq *lr;
int level;
+ u64 eflags;
level = gpiod_get_raw_value_cansleep(line->desc);
if (level < 0) {
@@ -647,7 +666,8 @@ static void debounce_work_func(struct work_struct *work)
WRITE_ONCE(line->level, level);
/* -- edge detection -- */
- if (!line->eflags)
+ eflags = READ_ONCE(line->eflags);
+ if (!eflags)
return;
/* switch from physical level to logical - if they differ */
@@ -655,15 +675,15 @@ static void debounce_work_func(struct work_struct *work)
level = !level;
/* ignore edges that are not being monitored */
- if (((line->eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
- ((line->eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
+ if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
+ ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
return;
/* Do not leak kernel stack to userspace */
memset(&le, 0, sizeof(le));
lr = line->req;
- le.timestamp_ns = ktime_get_ns();
+ le.timestamp_ns = line_event_timestamp(line);
le.offset = gpio_chip_hwgpio(line->desc);
line->line_seqno++;
le.line_seqno = line->line_seqno;
@@ -755,7 +775,9 @@ static void edge_detector_stop(struct line *line)
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
- line->eflags = 0;
+ WRITE_ONCE(line->eflags, 0);
+ if (line->desc)
+ WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
}
@@ -774,7 +796,7 @@ static int edge_detector_setup(struct line *line,
if (ret)
return ret;
}
- line->eflags = eflags;
+ WRITE_ONCE(line->eflags, eflags);
if (gpio_v2_line_config_debounced(lc, line_idx)) {
debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
ret = debounce_setup(line, debounce_period_us);
@@ -817,13 +839,13 @@ static int edge_detector_update(struct line *line,
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period(lc, line_idx);
- if ((line->eflags == eflags) && !polarity_change &&
+ if ((READ_ONCE(line->eflags) == eflags) && !polarity_change &&
(READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
return 0;
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
- line->eflags = eflags;
+ WRITE_ONCE(line->eflags, eflags);
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
return 0;
}
@@ -967,6 +989,9 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
assign_bit(FLAG_BIAS_DISABLE, flagsp,
flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
+
+ assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
+ flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
}
static long linereq_get_values(struct linereq *lr, void __user *ip)
@@ -1479,21 +1504,10 @@ static __poll_t lineevent_poll(struct file *file,
return events;
}
-static ssize_t lineevent_get_size(void)
-{
-#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
- /* i386 has no padding after 'id' */
- if (in_ia32_syscall()) {
- struct compat_gpioeevent_data {
- compat_u64 timestamp;
- u32 id;
- };
-
- return sizeof(struct compat_gpioeevent_data);
- }
-#endif
- return sizeof(struct gpioevent_data);
-}
+struct compat_gpioeevent_data {
+ compat_u64 timestamp;
+ u32 id;
+};
static ssize_t lineevent_read(struct file *file,
char __user *buf,
@@ -1515,7 +1529,10 @@ static ssize_t lineevent_read(struct file *file,
* actual sizeof() and pass this as an argument to copy_to_user() to
* drop unneeded bytes from the output.
*/
- ge_size = lineevent_get_size();
+ if (compat_need_64bit_alignment_fixup())
+ ge_size = sizeof(struct compat_gpioeevent_data);
+ else
+ ge_size = sizeof(struct gpioevent_data);
if (count < ge_size)
return -EINVAL;
@@ -1910,6 +1927,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
test_bit(FLAG_EXPORT, &desc->flags) ||
test_bit(FLAG_SYSFS, &desc->flags) ||
+ !gpiochip_line_is_valid(gc, info->offset) ||
!ok_for_pinctrl)
info->flags |= GPIO_V2_LINE_FLAG_USED;
@@ -1938,6 +1956,9 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
if (test_bit(FLAG_EDGE_FALLING, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
+ if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
+ info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
+
debounce_period_us = READ_ONCE(desc->debounce_period_us);
if (debounce_period_us) {
info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
@@ -1960,6 +1981,21 @@ struct gpio_chardev_data {
#endif
};
+static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ struct gpio_device *gdev = cdev->gdev;
+ struct gpiochip_info chipinfo;
+
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
+ strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
+ strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
+ chipinfo.lines = gdev->ngpio;
+ if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+ return -EFAULT;
+ return 0;
+}
+
#ifdef CONFIG_GPIO_CDEV_V1
/*
* returns 0 if the versions match, else the previously selected ABI version
@@ -1974,6 +2010,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
return abiv;
}
+
+static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
+ bool watch)
+{
+ struct gpio_desc *desc;
+ struct gpioline_info lineinfo;
+ struct gpio_v2_line_info lineinfo_v2;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+
+ /* this doubles as a range check on line_offset */
+ desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (watch) {
+ if (lineinfo_ensure_abi_version(cdev, 1))
+ return -EPERM;
+
+ if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
+ return -EBUSY;
+ }
+
+ gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+ gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+ if (watch)
+ clear_bit(lineinfo.line_offset, cdev->watched_lines);
+ return -EFAULT;
+ }
+
+ return 0;
+}
#endif
static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
@@ -2011,6 +2082,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
return 0;
}
+static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ __u32 offset;
+
+ if (copy_from_user(&offset, ip, sizeof(offset)))
+ return -EFAULT;
+
+ if (offset >= cdev->gdev->ngpio)
+ return -EINVAL;
+
+ if (!test_and_clear_bit(offset, cdev->watched_lines))
+ return -EBUSY;
+
+ return 0;
+}
+
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
@@ -2018,80 +2105,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
- struct gpio_chip *gc = gdev->chip;
void __user *ip = (void __user *)arg;
- __u32 offset;
/* We fail any subsequent ioctl():s when the chip is gone */
- if (!gc)
+ if (!gdev->chip)
return -ENODEV;
/* Fill in the struct and pass to userspace */
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
- struct gpiochip_info chipinfo;
-
- memset(&chipinfo, 0, sizeof(chipinfo));
-
- strscpy(chipinfo.name, dev_name(&gdev->dev),
- sizeof(chipinfo.name));
- strscpy(chipinfo.label, gdev->label,
- sizeof(chipinfo.label));
- chipinfo.lines = gdev->ngpio;
- if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
- return -EFAULT;
- return 0;
+ return chipinfo_get(cdev, ip);
#ifdef CONFIG_GPIO_CDEV_V1
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
- return -EFAULT;
- return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
- } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- if (lineinfo_ensure_abi_version(cdev, 1))
- return -EPERM;
-
- if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
- return -EBUSY;
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
- clear_bit(lineinfo.line_offset, cdev->watched_lines);
- return -EFAULT;
- }
-
- return 0;
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ return lineinfo_get_v1(cdev, ip,
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
#endif /* CONFIG_GPIO_CDEV_V1 */
} else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
@@ -2100,16 +2131,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
return linereq_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
- if (copy_from_user(&offset, ip, sizeof(offset)))
- return -EFAULT;
-
- if (offset >= cdev->gdev->ngpio)
- return -EINVAL;
-
- if (!test_and_clear_bit(offset, cdev->watched_lines))
- return -EBUSY;
-
- return 0;
+ return lineinfo_unwatch(cdev, ip);
}
return -EINVAL;
}
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 7dbce4c4ebdf..4a517e5dedf0 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -246,10 +246,8 @@ struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
struct gpio_desc *desc;
desc = devm_gpiod_get_index(dev, con_id, index, flags);
- if (IS_ERR(desc)) {
- if (PTR_ERR(desc) == -ENOENT)
- return NULL;
- }
+ if (gpiod_not_found(desc))
+ return NULL;
return desc;
}
@@ -308,7 +306,7 @@ devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
struct gpio_descs *descs;
descs = devm_gpiod_get_array(dev, con_id, flags);
- if (PTR_ERR(descs) == -ENOENT)
+ if (gpiod_not_found(descs))
return NULL;
return descs;
@@ -479,9 +477,9 @@ void devm_gpio_free(struct device *dev, unsigned int gpio)
}
EXPORT_SYMBOL_GPL(devm_gpio_free);
-static void devm_gpio_chip_release(struct device *dev, void *res)
+static void devm_gpio_chip_release(void *data)
{
- struct gpio_chip *gc = *(struct gpio_chip **)res;
+ struct gpio_chip *gc = data;
gpiochip_remove(gc);
}
@@ -507,23 +505,12 @@ int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, vo
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
- struct gpio_chip **ptr;
int ret;
- ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
ret = gpiochip_add_data_with_key(gc, data, lock_key, request_key);
- if (ret < 0) {
- devres_free(ptr);
+ if (ret < 0)
return ret;
- }
- *ptr = gc;
- devres_add(dev, ptr);
-
- return 0;
+ return devm_add_action_or_reset(dev, devm_gpio_chip_release, gc);
}
EXPORT_SYMBOL_GPL(devm_gpiochip_add_data_with_key);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 2f895a2b8411..b4a71119a4b0 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -509,31 +509,31 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
&of_flags);
- if (!IS_ERR(desc) || PTR_ERR(desc) != -ENOENT)
+ if (!gpiod_not_found(desc))
break;
}
- if (PTR_ERR(desc) == -ENOENT) {
+ if (gpiod_not_found(desc)) {
/* Special handling for SPI GPIOs if used */
desc = of_find_spi_gpio(dev, con_id, &of_flags);
}
- if (PTR_ERR(desc) == -ENOENT) {
+ if (gpiod_not_found(desc)) {
/* This quirk looks up flags and all */
desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
if (!IS_ERR(desc))
return desc;
}
- if (PTR_ERR(desc) == -ENOENT) {
+ if (gpiod_not_found(desc)) {
/* Special handling for regulator GPIOs if used */
desc = of_find_regulator_gpio(dev, con_id, &of_flags);
}
- if (PTR_ERR(desc) == -ENOENT)
+ if (gpiod_not_found(desc))
desc = of_find_arizona_gpio(dev, con_id, &of_flags);
- if (PTR_ERR(desc) == -ENOENT)
+ if (gpiod_not_found(desc))
desc = of_find_usb_gpio(dev, con_id, &of_flags);
if (IS_ERR(desc))
@@ -593,7 +593,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
xlate_flags = 0;
*lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- *dflags = 0;
+ *dflags = GPIOD_ASIS;
ret = of_property_read_u32(chip_np, "#gpio-cells", &tmp);
if (ret)
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 728f6c687182..26c5466b8179 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -476,7 +476,7 @@ static ssize_t export_store(struct class *class,
*/
status = gpiod_request(desc, "sysfs");
- if (status < 0) {
+ if (status) {
if (status == -EPROBE_DEFER)
status = -ENODEV;
goto done;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 6e3c4d7a7d14..97eec8d8dbdc 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -119,7 +120,7 @@ struct gpio_desc *gpio_to_desc(unsigned gpio)
spin_unlock_irqrestore(&gpio_lock, flags);
if (!gpio_is_valid(gpio))
- WARN(1, "invalid GPIO %d\n", gpio);
+ pr_warn("invalid GPIO %d\n", gpio);
return NULL;
}
@@ -211,7 +212,7 @@ static int gpiochip_find_base(int ngpio)
int gpiod_get_direction(struct gpio_desc *desc)
{
struct gpio_chip *gc;
- unsigned offset;
+ unsigned int offset;
int ret;
gc = gpiod_to_chip(desc);
@@ -602,7 +603,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gdev->id;
goto err_free_gdev;
}
- dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+
+ ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+ if (ret)
+ goto err_free_ida;
+
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
if (gc->parent && gc->parent->driver)
@@ -616,7 +621,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
ret = -ENOMEM;
- goto err_free_ida;
+ goto err_free_dev_name;
}
if (gc->ngpio == 0) {
@@ -767,13 +772,17 @@ err_free_label:
kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
+err_free_dev_name:
+ kfree(dev_name(&gdev->dev));
err_free_ida:
ida_free(&gpio_ida, gdev->id);
err_free_gdev:
/* failures here can mean systems won't boot... */
- pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
- gdev->base, gdev->base + gdev->ngpio - 1,
- gc->label ? : "generic", ret);
+ if (ret != -EPROBE_DEFER) {
+ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
+ gdev->base, gdev->base + gdev->ngpio - 1,
+ gc->label ? : "generic", ret);
+ }
kfree(gdev);
return ret;
}
@@ -936,67 +945,6 @@ bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
}
EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
-/**
- * gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
- * @gc: the gpiochip to set the irqchip chain to
- * @parent_irq: the irq number corresponding to the parent IRQ for this
- * cascaded irqchip
- * @parent_handler: the parent interrupt handler for the accumulated IRQ
- * coming out of the gpiochip. If the interrupt is nested rather than
- * cascaded, pass NULL in this handler argument
- */
-static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc,
- unsigned int parent_irq,
- irq_flow_handler_t parent_handler)
-{
- struct gpio_irq_chip *girq = &gc->irq;
- struct device *dev = &gc->gpiodev->dev;
-
- if (!girq->domain) {
- chip_err(gc, "called %s before setting up irqchip\n",
- __func__);
- return;
- }
-
- if (parent_handler) {
- if (gc->can_sleep) {
- chip_err(gc,
- "you cannot have chained interrupts on a chip that may sleep\n");
- return;
- }
- girq->parents = devm_kcalloc(dev, 1,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents) {
- chip_err(gc, "out of memory allocating parent IRQ\n");
- return;
- }
- girq->parents[0] = parent_irq;
- girq->num_parents = 1;
- /*
- * The parent irqchip is already using the chip_data for this
- * irqchip, so our callbacks simply use the handler_data.
- */
- irq_set_chained_handler_and_data(parent_irq, parent_handler,
- gc);
- }
-}
-
-/**
- * gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
- * @gc: the gpiochip to set the irqchip nested handler to
- * @irqchip: the irqchip to nest to the gpiochip
- * @parent_irq: the irq number corresponding to the parent IRQ for this
- * nested irqchip
- */
-void gpiochip_set_nested_irqchip(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int parent_irq)
-{
- gpiochip_set_cascaded_irqchip(gc, parent_irq, NULL);
-}
-EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
-
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
@@ -1394,7 +1342,7 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
}
EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate);
-static int gpiochip_to_irq(struct gpio_chip *gc, unsigned offset)
+static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
@@ -1477,7 +1425,8 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
if (WARN_ON(gc->irq.irq_enable))
return;
/* Check if the irqchip already has this hook... */
- if (irqchip->irq_enable == gpiochip_irq_enable) {
+ if (irqchip->irq_enable == gpiochip_irq_enable ||
+ irqchip->irq_mask == gpiochip_irq_mask) {
/*
* ...and if so, give a gentle warning that this is bad
* practice.
@@ -1546,6 +1495,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
type = IRQ_TYPE_NONE;
}
+ if (gc->to_irq)
+ chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+
gc->to_irq = gpiochip_to_irq;
gc->irq.default_type = type;
gc->irq.lock_key = lock_key;
@@ -1648,98 +1600,6 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc)
}
/**
- * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
- * @gc: the gpiochip to add the irqchip to
- * @irqchip: the irqchip to add to the gpiochip
- * @first_irq: if not dynamically assigned, the base (first) IRQ to
- * allocate gpiochip irqs from
- * @handler: the irq handler to use (often a predefined irq core function)
- * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
- * to have the core avoid setting up any default type in the hardware.
- * @threaded: whether this irqchip uses a nested thread handler
- * @lock_key: lockdep class for IRQ lock
- * @request_key: lockdep class for IRQ request
- *
- * This function closely associates a certain irqchip with a certain
- * gpiochip, providing an irq domain to translate the local IRQs to
- * global irqs in the gpiolib core, and making sure that the gpiochip
- * is passed as chip data to all related functions. Driver callbacks
- * need to use gpiochip_get_data() to get their local state containers back
- * from the gpiochip passed as chip data. An irqdomain will be stored
- * in the gpiochip that shall be used by the driver to handle IRQ number
- * translation. The gpiochip will need to be initialized and registered
- * before calling this function.
- *
- * This function will handle two cell:ed simple IRQs and assumes all
- * the pins on the gpiochip can generate a unique IRQ. Everything else
- * need to be open coded.
- */
-int gpiochip_irqchip_add_key(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type,
- bool threaded,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key)
-{
- struct device_node *of_node;
-
- if (!gc || !irqchip)
- return -EINVAL;
-
- if (!gc->parent) {
- chip_err(gc, "missing gpiochip .dev parent pointer\n");
- return -EINVAL;
- }
- gc->irq.threaded = threaded;
- of_node = gc->parent->of_node;
-#ifdef CONFIG_OF_GPIO
- /*
- * If the gpiochip has an assigned OF node this takes precedence
- * FIXME: get rid of this and use gc->parent->of_node
- * everywhere
- */
- if (gc->of_node)
- of_node = gc->of_node;
-#endif
- /*
- * Specifying a default trigger is a terrible idea if DT or ACPI is
- * used to configure the interrupts, as you may end-up with
- * conflicting triggers. Tell the user, and reset to NONE.
- */
- if (WARN(of_node && type != IRQ_TYPE_NONE,
- "%pOF: Ignoring %d default trigger\n", of_node, type))
- type = IRQ_TYPE_NONE;
- if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) {
- acpi_handle_warn(ACPI_HANDLE(gc->parent),
- "Ignoring %d default trigger\n", type);
- type = IRQ_TYPE_NONE;
- }
-
- gc->irq.chip = irqchip;
- gc->irq.handler = handler;
- gc->irq.default_type = type;
- gc->to_irq = gpiochip_to_irq;
- gc->irq.lock_key = lock_key;
- gc->irq.request_key = request_key;
- gc->irq.domain = irq_domain_add_simple(of_node,
- gc->ngpio, first_irq,
- &gpiochip_domain_ops, gc);
- if (!gc->irq.domain) {
- gc->irq.chip = NULL;
- return -EINVAL;
- }
-
- gpiochip_set_irq_hooks(gc);
-
- acpi_gpiochip_request_interrupts(gc);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
-
-/**
* gpiochip_irqchip_add_domain() - adds an irqdomain to a gpiochip
* @gc: the gpiochip to add the irqchip to
* @domain: the irqdomain to add to the gpiochip
@@ -1788,7 +1648,7 @@ static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc)
* @gc: the gpiochip owning the GPIO
* @offset: the offset of the GPIO to request for GPIO function
*/
-int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset)
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset)
{
#ifdef CONFIG_PINCTRL
if (list_empty(&gc->gpiodev->pin_ranges))
@@ -1804,7 +1664,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
* @gc: the gpiochip to request the gpio function for
* @offset: the offset of the GPIO to free from GPIO function
*/
-void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset)
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset)
{
#ifdef CONFIG_PINCTRL
if (list_empty(&gc->gpiodev->pin_ranges))
@@ -1821,7 +1681,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
* @offset: the offset of the GPIO to apply the configuration
* @config: the configuration to be applied
*/
-int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset,
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
{
return pinctrl_gpio_set_config(gc->gpiodev->base + offset, config);
@@ -1985,11 +1845,9 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
desc_set_label(desc, label ? : "?");
- ret = 0;
} else {
- kfree_const(label);
ret = -EBUSY;
- goto done;
+ goto out_free_unlock;
}
if (gc->request) {
@@ -2002,11 +1860,10 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
ret = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
- if (ret < 0) {
+ if (ret) {
desc_set_label(desc, NULL);
- kfree_const(label);
clear_bit(FLAG_REQUESTED, &desc->flags);
- goto done;
+ goto out_free_unlock;
}
}
if (gc->get_direction) {
@@ -2015,8 +1872,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
gpiod_get_direction(desc);
spin_lock_irqsave(&gpio_lock, flags);
}
-done:
spin_unlock_irqrestore(&gpio_lock, flags);
+ return 0;
+
+out_free_unlock:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ kfree_const(label);
return ret;
}
@@ -2068,7 +1929,7 @@ int gpiod_request(struct gpio_desc *desc, const char *label)
if (try_module_get(gdev->owner)) {
ret = gpiod_request_commit(desc, label);
- if (ret < 0)
+ if (ret)
module_put(gdev->owner);
else
get_device(&gdev->dev);
@@ -2151,7 +2012,7 @@ void gpiod_free(struct gpio_desc *desc)
* help with diagnostics, and knowing that the signal is used as a GPIO
* can help avoid accidentally multiplexing it to another controller.
*/
-const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned offset)
+const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_desc *desc;
@@ -2251,30 +2112,49 @@ static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset,
return gc->set_config(gc, offset, config);
}
-static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode)
+static int gpio_set_config_with_argument(struct gpio_desc *desc,
+ enum pin_config_param mode,
+ u32 argument)
{
struct gpio_chip *gc = desc->gdev->chip;
unsigned long config;
- unsigned arg;
+
+ config = pinconf_to_config_packed(mode, argument);
+ return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
+}
+
+static int gpio_set_config_with_argument_optional(struct gpio_desc *desc,
+ enum pin_config_param mode,
+ u32 argument)
+{
+ struct device *dev = &desc->gdev->dev;
+ int gpio = gpio_chip_hwgpio(desc);
+ int ret;
+
+ ret = gpio_set_config_with_argument(desc, mode, argument);
+ if (ret != -ENOTSUPP)
+ return ret;
switch (mode) {
- case PIN_CONFIG_BIAS_PULL_DOWN:
- case PIN_CONFIG_BIAS_PULL_UP:
- arg = 1;
+ case PIN_CONFIG_PERSIST_STATE:
+ dev_dbg(dev, "Persistence not supported for GPIO %d\n", gpio);
break;
-
default:
- arg = 0;
+ break;
}
- config = PIN_CONF_PACKED(mode, arg);
- return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
+ return 0;
+}
+
+static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode)
+{
+ return gpio_set_config_with_argument(desc, mode, 0);
}
static int gpio_set_bias(struct gpio_desc *desc)
{
- int bias = 0;
- int ret = 0;
+ enum pin_config_param bias;
+ unsigned int arg;
if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
bias = PIN_CONFIG_BIAS_DISABLE;
@@ -2282,13 +2162,28 @@ static int gpio_set_bias(struct gpio_desc *desc)
bias = PIN_CONFIG_BIAS_PULL_UP;
else if (test_bit(FLAG_PULL_DOWN, &desc->flags))
bias = PIN_CONFIG_BIAS_PULL_DOWN;
+ else
+ return 0;
- if (bias) {
- ret = gpio_set_config(desc, bias);
- if (ret != -ENOTSUPP)
- return ret;
+ switch (bias) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = 1;
+ break;
+
+ default:
+ arg = 0;
+ break;
}
- return 0;
+
+ return gpio_set_config_with_argument_optional(desc, bias, arg);
+}
+
+int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
+{
+ return gpio_set_config_with_argument_optional(desc,
+ PIN_CONFIG_INPUT_DEBOUNCE,
+ debounce);
}
/**
@@ -2510,7 +2405,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_config);
* 0 on success, %-ENOTSUPP if the controller doesn't support setting the
* debounce time.
*/
-int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
+int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce)
{
unsigned long config;
@@ -2529,11 +2424,6 @@ EXPORT_SYMBOL_GPL(gpiod_set_debounce);
*/
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
- struct gpio_chip *gc;
- unsigned long packed;
- int gpio;
- int rc;
-
VALIDATE_DESC(desc);
/*
* Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
@@ -2542,21 +2432,9 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
- gc = desc->gdev->chip;
- if (!gc->set_config)
- return 0;
-
- packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
- !transitory);
- gpio = gpio_chip_hwgpio(desc);
- rc = gpio_do_set_config(gc, gpio, packed);
- if (rc == -ENOTSUPP) {
- dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
- gpio);
- return 0;
- }
-
- return rc;
+ return gpio_set_config_with_argument_optional(desc,
+ PIN_CONFIG_PERSIST_STATE,
+ !transitory);
}
EXPORT_SYMBOL_GPL(gpiod_set_transitory);
@@ -2679,7 +2557,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
struct gpio_chip *gc = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
unsigned long *mask, *bits;
- int first, j, ret;
+ int first, j;
if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath;
@@ -3784,7 +3662,7 @@ struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
desc = fwnode_get_named_gpiod(fwnode, prop_name, index, flags,
label);
- if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
+ if (!gpiod_not_found(desc))
break;
}
@@ -3960,7 +3838,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
* Either we are not using DT or ACPI, or their lookup did not return
* a result. In that case, use platform lookup as a fallback.
*/
- if (!desc || desc == ERR_PTR(-ENOENT)) {
+ if (!desc || gpiod_not_found(desc)) {
dev_dbg(dev, "using lookup tables for GPIO lookup\n");
desc = gpiod_find(dev, con_id, idx, &lookupflags);
}
@@ -3975,7 +3853,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
* the device name as label
*/
ret = gpiod_request(desc, con_id ? con_id : devname);
- if (ret < 0) {
+ if (ret) {
if (ret == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
/*
* This happens when there are several consumers for
@@ -4095,10 +3973,8 @@ struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
struct gpio_desc *desc;
desc = gpiod_get_index(dev, con_id, index, flags);
- if (IS_ERR(desc)) {
- if (PTR_ERR(desc) == -ENOENT)
- return NULL;
- }
+ if (gpiod_not_found(desc))
+ return NULL;
return desc;
}
@@ -4300,7 +4176,7 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
struct gpio_descs *descs;
descs = gpiod_get_array(dev, con_id, flags);
- if (PTR_ERR(descs) == -ENOENT)
+ if (gpiod_not_found(descs))
return NULL;
return descs;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index b674b5bb980e..30bc3f80f83e 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -116,6 +116,7 @@ struct gpio_desc {
#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
+#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
/* Connection label */
const char *label;
@@ -130,10 +131,13 @@ struct gpio_desc {
#endif
};
+#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+
int gpiod_request(struct gpio_desc *desc, const char *label);
void gpiod_free(struct gpio_desc *desc);
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
+int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6e2953233231..5993dd0fdd8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1024,6 +1024,7 @@ struct amdgpu_device {
/* enable runtime pm on the device */
bool runpm;
bool in_runpm;
+ bool has_pr3;
bool pm_sysfs_en;
bool ucode_sysfs_en;
@@ -1230,6 +1231,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
+bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
@@ -1280,6 +1282,8 @@ int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+int amdgpu_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
/*
* functions used by amdgpu_encoder.c
@@ -1311,11 +1315,11 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
struct amdgpu_dm_backlight_caps *caps);
-bool amdgpu_acpi_is_s0ix_supported(void);
+bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
-static inline bool amdgpu_acpi_is_s0ix_supported(void) { return false; }
+static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; }
#endif
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4f4fda53c08a..8155c54392c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -901,10 +901,12 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
*
* returns true if supported, false if not.
*/
-bool amdgpu_acpi_is_s0ix_supported(void)
+bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
{
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
- return true;
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
+ if (adev->flags & AMD_IS_APU)
+ return true;
+ }
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 7791d074bd32..d1ed4f8df2b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -26,6 +26,7 @@
#include <linux/sched/task.h>
#include "amdgpu_object.h"
+#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
@@ -1152,7 +1153,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
- struct amdgpu_bo_param bp;
+ struct drm_gem_object *gobj;
u32 domain, alloc_domain;
u64 alloc_flags;
int ret;
@@ -1213,26 +1214,21 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
- pr_debug("Insufficient system memory\n");
+ pr_debug("Insufficient memory\n");
goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain));
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = 1;
- bp.domain = alloc_domain;
- bp.flags = alloc_flags;
- bp.type = bo_type;
- bp.resv = NULL;
- ret = amdgpu_bo_create(adev, &bp, &bo);
+ ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
+ bo_type, NULL, &gobj);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
- domain_string(alloc_domain), ret);
+ domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 306077884a67..6107ac91db25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
union igp_info {
struct atom_integrated_system_info_v1_11 v11;
struct atom_integrated_system_info_v1_12 v12;
+ struct atom_integrated_system_info_v2_1 v21;
};
union umc_info {
@@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (adev->flags & AMD_IS_APU) {
igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 11:
- mem_channel_number = igp_info->v11.umachannelnumber;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
- mem_type = igp_info->v11.memorytype;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 11:
+ case 12:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
+ mem_type = igp_info->v11.memorytype;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
- case 12:
- mem_channel_number = igp_info->v12.umachannelnumber;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
- mem_type = igp_info->v12.memorytype;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ mem_channel_number = igp_info->v21.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
+ mem_type = igp_info->v21.memorytype;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 65d1b23d7e74..b9c11c2b2885 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1414,10 +1414,12 @@ out:
pm_runtime_put_autosuspend(connector->dev->dev);
}
- drm_dp_set_subconnector_property(&amdgpu_connector->base,
- ret,
- amdgpu_dig_connector->dpcd,
- amdgpu_dig_connector->downstream_ports);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ drm_dp_set_subconnector_property(&amdgpu_connector->base,
+ ret,
+ amdgpu_dig_connector->dpcd,
+ amdgpu_dig_connector->downstream_ports);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 79dd85f71fab..cab1ebaf6d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
@@ -212,14 +211,14 @@ static DEVICE_ATTR(serial_number, S_IRUGO,
amdgpu_device_get_serial_number, NULL);
/**
- * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
+ * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
*
* @dev: drm_device pointer
*
* Returns true if the device is a dGPU with HG/PX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_boco(struct drm_device *dev)
+bool amdgpu_device_supports_atpx(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -229,6 +228,23 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
}
/**
+ * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
+ *
+ * @dev: drm_device pointer
+ *
+ * Returns true if the device is a dGPU with HG/PX power control,
+ * otherwise return false.
+ */
+bool amdgpu_device_supports_boco(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ if (adev->has_pr3)
+ return true;
+ return false;
+}
+
+/**
* amdgpu_device_supports_baco - Does the device support BACO
*
* @dev: drm_device pointer
@@ -1398,7 +1414,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -2531,11 +2547,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
- amdgpu_amdkfd_device_fini(adev);
-
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ amdgpu_amdkfd_device_fini(adev);
+
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@@ -2650,7 +2666,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- if (!amdgpu_acpi_is_s0ix_supported() || amdgpu_in_reset(adev)) {
+ if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
}
@@ -3017,7 +3033,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#endif
default:
if (amdgpu_dc > 0)
- DRM_INFO("Display Core has been requested via kernel parameter "
+ DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
"but isn't supported by ASIC, ignoring\n");
return false;
}
@@ -3177,7 +3193,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
- bool boco = false;
+ bool atpx = false;
u32 max_MBps;
adev->shutdown = false;
@@ -3349,15 +3365,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
- if (amdgpu_device_supports_boco(ddev))
- boco = true;
+ if (amdgpu_device_supports_atpx(ddev))
+ atpx = true;
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
!pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_register_client(adev->pdev,
- &amdgpu_switcheroo_ops, boco);
- if (boco)
+ &amdgpu_switcheroo_ops, atpx);
+ if (atpx)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
if (amdgpu_emu_mode == 1) {
@@ -3540,7 +3556,7 @@ fence_driver_init:
failed:
amdgpu_vf_error_trans_all(adev);
- if (boco)
+ if (atpx)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
failed_unmap:
@@ -3604,7 +3620,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_has_atpx_dgpu_power_cntl()) &&
!pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_unregister_client(adev->pdev);
- if (amdgpu_device_supports_boco(adev_to_drm(adev)))
+ if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
vga_switcheroo_fini_domain_pm_ops(adev->dev);
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, NULL, NULL, NULL);
@@ -3710,7 +3726,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_fence_driver_suspend(adev);
- if (!amdgpu_acpi_is_s0ix_supported() || amdgpu_in_reset(adev))
+ if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
r = amdgpu_device_ip_suspend_phase2(adev);
else
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
@@ -3744,7 +3760,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (amdgpu_acpi_is_s0ix_supported())
+ if (amdgpu_acpi_is_s0ix_supported(adev))
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
/* post card */
@@ -5052,8 +5068,7 @@ out:
* @pdev: pointer to PCI device
*
* Called when the error recovery driver tells us that its
- * OK to resume normal operation. Use completion to allow
- * halted scsi ops to resume.
+ * OK to resume normal operation.
*/
void amdgpu_pci_resume(struct pci_dev *pdev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f764803c53a4..48cb33e5b382 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -926,8 +926,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint32_t domains;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
@@ -938,7 +940,9 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
- if (obj->import_attach) {
+ bo = gem_to_amdgpu_bo(obj);
+ domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
+ if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index ebdab31f9de9..7169fb5e3d9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1085,6 +1085,8 @@ static const struct pci_device_id pciidlist[] = {
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
@@ -1340,7 +1342,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
adev->in_runpm = true;
- if (amdgpu_device_supports_boco(drm_dev))
+ if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
@@ -1348,13 +1350,11 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
if (ret)
return ret;
- if (amdgpu_device_supports_boco(drm_dev)) {
+ if (amdgpu_device_supports_atpx(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
- if (amdgpu_is_atpx_hybrid()) {
- pci_ignore_hotplug(pdev);
- } else {
+ if (!amdgpu_is_atpx_hybrid()) {
amdgpu_device_cache_pci_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
@@ -1378,28 +1378,31 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
if (!adev->runpm)
return -EINVAL;
- if (amdgpu_device_supports_boco(drm_dev)) {
+ if (amdgpu_device_supports_atpx(drm_dev)) {
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
- if (amdgpu_is_atpx_hybrid()) {
- pci_set_master(pdev);
- } else {
+ if (!amdgpu_is_atpx_hybrid()) {
pci_set_power_state(pdev, PCI_D0);
amdgpu_device_load_pci_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
- pci_set_master(pdev);
}
+ pci_set_master(pdev);
+ } else if (amdgpu_device_supports_boco(drm_dev)) {
+ /* Only need to handle PCI state in the driver for ATPX
+ * PCI core handles it for _PR3.
+ */
+ pci_set_master(pdev);
} else if (amdgpu_device_supports_baco(drm_dev)) {
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
drm_kms_helper_poll_enable(drm_dev);
- if (amdgpu_device_supports_boco(drm_dev))
+ if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
return 0;
@@ -1533,8 +1536,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
-
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d0a1fee1f5f6..174a73eb23f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -269,8 +269,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
resv = vm->root.base.bo->tbo.base.resv;
}
-retry:
initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 02af47ddddbc..6e679db5e46f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -496,13 +496,15 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
break;
}
- if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE))
+ if (amdgpu_sriov_vf(adev) ||
+ !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
size = 0;
- else
+ } else {
size = amdgpu_gmc_get_vbios_fb_size(adev);
- if (adev->mman.keep_stolen_vga_memory)
- size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
+ if (adev->mman.keep_stolen_vga_memory)
+ size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
+ }
/* set to 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index fc12fc72366f..b16b32797624 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -133,6 +133,7 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{
struct drm_device *dev;
+ struct pci_dev *parent;
int r, acpi_status;
dev = adev_to_drm(adev);
@@ -144,6 +145,9 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
!pci_is_thunderbolt_attached(dev->pdev))
flags |= AMD_IS_PX;
+ parent = pci_upstream_bridge(adev->pdev);
+ adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
+
/* amdgpu_device_init should report only fatal error
* like memory allocation failure or iomapping failure,
* or memory manager initialization failure, it must
@@ -156,9 +160,14 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
goto out;
}
- if (amdgpu_device_supports_boco(dev) &&
- (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
+ if (amdgpu_device_supports_atpx(dev) &&
+ (amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */
+ adev->runpm = true;
+ dev_info(adev->dev, "Using ATPX for runtime pm\n");
+ } else if (amdgpu_device_supports_boco(dev) &&
+ (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
adev->runpm = true;
+ dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else if (amdgpu_device_supports_baco(dev) &&
(amdgpu_runtime_pm != 0)) {
switch (adev->asic_type) {
@@ -180,6 +189,8 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
adev->runpm = true;
break;
}
+ if (adev->runpm)
+ dev_info(adev->dev, "Using BACO for runtime pm\n");
}
/* Call ACPI methods: require modeset init
@@ -192,7 +203,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (adev->runpm) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_boco(dev) &&
+ if (amdgpu_device_supports_atpx(dev) &&
!amdgpu_is_atpx_hybrid())
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25ec4d57333f..b4c8e5d5c763 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -897,7 +897,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
- if (bo->prime_shared_count) {
+ if (bo->prime_shared_count || bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 523d22db094b..347fec669424 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -563,7 +563,7 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -1315,8 +1315,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->hdcp_context.hdcp_initialized)
- return 0;
+ if (!psp->hdcp_context.hdcp_initialized) {
+ if (psp->hdcp_context.hdcp_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_hdcp_unload(psp);
if (ret)
@@ -1324,6 +1328,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
psp->hdcp_context.hdcp_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
&psp->hdcp_context.hdcp_shared_mc_addr,
@@ -1462,8 +1467,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->dtm_context.dtm_initialized)
- return 0;
+ if (!psp->dtm_context.dtm_initialized) {
+ if (psp->dtm_context.dtm_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_dtm_unload(psp);
if (ret)
@@ -1471,6 +1480,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
psp->dtm_context.dtm_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
&psp->dtm_context.dtm_shared_mc_addr,
@@ -2589,11 +2599,10 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
- psp->asd_fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd_fw_version = le32_to_cpu(desc->fw_version);
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
- psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
- psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c136bd449744..82e952696d24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1518,7 +1518,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
int i = 0;
- int ret = 0;
+ int ret = 0, status;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
@@ -1543,12 +1543,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
- ret = amdgpu_vram_mgr_query_page_status(
+ status = amdgpu_vram_mgr_query_page_status(
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
data->bps[i].retired_page);
- if (ret == -EBUSY)
+ if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (ret == -ENOENT)
+ else if (status == -ENOENT)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 1dd040166c63..19d9aa76cfbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -30,6 +30,7 @@
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
+#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -62,7 +63,8 @@
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
- (adev->asic_type == CHIP_ARCTURUS))
+ (adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type == CHIP_SIENNA_CICHLID))
return true;
return false;
@@ -100,6 +102,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+ case CHIP_SIENNA_CICHLID:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
+ break;
+
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 324d5e3f3579..6752d8b13118 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -358,10 +358,11 @@ TRACE_EVENT(amdgpu_vm_update_ptes,
}
),
TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
- " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
+ " flags:0x%llx, incr:%llu, dst:\n%s%s", __entry->pid,
__entry->vm_ctx, __entry->start, __entry->end,
__entry->flags, __entry->incr, __print_array(
- __get_dynamic_array(dst), __entry->nptes, 8))
+ __get_dynamic_array(dst), min(__entry->nptes, 32u), 8),
+ __entry->nptes > 32 ? "..." : "")
);
TRACE_EVENT(amdgpu_vm_set_ptes,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 7c5b60e53482..8b989670ed66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -240,7 +240,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
/*
@@ -267,7 +267,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
- DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
+ DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n",
enc_major, enc_minor, dec_minor, family_id);
adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 9791a4057e8b..0d5284b936e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -179,7 +179,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
version_major = (ucode_version >> 20) & 0xfff;
version_minor = (ucode_version >> 8) & 0xfff;
binary_id = ucode_version & 0xff;
- DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
+ DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
version_major, version_minor, binary_id);
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
(binary_id << 8));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 1c97244e0d74..4a77c7424dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -181,7 +181,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
enc_major = fw_check;
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
- DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
+ DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
@@ -189,7 +189,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
+ DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index ba1086784525..d86b42a36560 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -99,6 +99,14 @@
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
+#define mmCGTS_TCC_DISABLE_Vangogh 0x5006
+#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1
+#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007
+#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441
#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1
#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261
@@ -115,6 +123,8 @@
#define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_Vangogh 0x2440
#define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1
+#define mmGCR_GENERAL_CNTL_Vangogh 0x1580
+#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0
#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
@@ -160,6 +170,9 @@
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -3237,7 +3250,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3324,6 +3337,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -4926,8 +4940,18 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
{
/* TCCs are global (not instanced). */
- uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
- RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ uint32_t tcc_disable;
+
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
+ break;
+ default:
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ break;
+ }
adev->gfx.config.tcc_disabled_mask =
REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
@@ -7192,6 +7216,9 @@ static int gfx_v10_0_hw_init(void *handle)
if (adev->asic_type == CHIP_SIENNA_CICHLID)
gfx_v10_3_program_pbb_mode(adev);
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ gfx_v10_3_set_power_brake_sequence(adev);
+
return r;
}
@@ -7377,8 +7404,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
- ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
+ break;
+ default:
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+ break;
+ }
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock;
@@ -9169,6 +9204,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
}
}
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
+ (0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) |
+ (0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) |
+ (0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA,
+ (0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) |
+ (0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) |
+ (0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) |
+ (0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid,
+ (0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) |
+ (0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) |
+ (0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
+
+ WREG32_SOC15(GC, 0, mmDIDT_IND_DATA,
+ (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
+}
+
const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fc9bb94eaaf4..5f4805e4d04a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1647,7 +1647,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
}
/* No CPG in Arcturus */
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
if (r)
return r;
@@ -2633,7 +2633,14 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
+ u32 tmp;
+
+ /* don't toggle interrupts that are only applicable
+ * to me0 pipe0 on AISCs that have me0 removed */
+ if (!adev->gfx.num_gfx_rings)
+ return;
+
+ tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
@@ -3822,7 +3829,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
/* legacy firmware loading */
r = gfx_v9_0_cp_gfx_load_microcode(adev);
if (r)
@@ -3838,7 +3845,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_cp_gfx_resume(adev);
if (r)
return r;
@@ -3848,7 +3855,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
ring = &adev->gfx.gfx_ring[0];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -3884,7 +3891,7 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
- if (adev->asic_type != CHIP_ARCTURUS)
+ if (adev->gfx.num_gfx_rings)
gfx_v9_0_cp_gfx_enable(adev, enable);
gfx_v9_0_cp_compute_enable(adev, enable);
}
@@ -4025,7 +4032,7 @@ static int gfx_v9_0_soft_reset(void *handle)
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
- if (adev->asic_type != CHIP_ARCTURUS)
+ if (adev->gfx.num_gfx_rings)
/* Disable GFX parsing/prefetching */
gfx_v9_0_cp_gfx_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e1531d97f486..e22268f9dba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1577,13 +1577,10 @@ static int gmc_v9_0_hw_init(void *handle)
gmc_v9_0_init_golden_registers(adev);
if (adev->mode_info.num_crtc) {
- if (adev->asic_type != CHIP_ARCTURUS) {
- /* Lockout access through VGA aperture*/
- WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
-
- /* disable VGA render */
- WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- }
+ /* Lockout access through VGA aperture*/
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ /* disable VGA render */
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
amdgpu_device_program_register_sequence(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 092ff2c43658..f107385faba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -136,6 +136,7 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_DIMGREY_CAVEFISH:
mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index b72c8e4ca36b..1961745e89c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -310,7 +310,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data, def1, data1;
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
- data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
} else {
- data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
if (def1 != data1)
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
}
@@ -525,17 +523,44 @@ static void
mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- uint32_t def, data;
-
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
- data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
- else
- data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ uint32_t def, data, def1, data1, def2, data2;
+
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ } else {
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ }
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
+ if (def1 != data1)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
+ if (def2 != data2)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
}
static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
@@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
- int data, data1;
+ int data, data1, data2, data3;
if (amdgpu_sriov_vf(adev))
*flags = 0;
- data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
- data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
/* AMD_CG_SUPPORT_MC_MGCG */
- if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
- !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
- *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
+ && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ }
/* AMD_CG_SUPPORT_MC_LS */
- if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
+ && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
+ && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
*flags |= AMD_CG_SUPPORT_MC_LS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index f5ce9a9f4cf5..7767ccca526b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -187,7 +187,16 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
static int xgpu_ai_request_reset(struct amdgpu_device *adev)
{
- return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+ int ret, i = 0;
+
+ while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
+ ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+ if (!ret)
+ break;
+ i++;
+ }
+
+ return ret;
}
static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 83b453f5d717..50572635d0f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -25,8 +25,9 @@
#define __MXGPU_AI_H__
#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
-#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
+#define AI_MAILBOX_POLL_MSG_TIMEDOUT 6000
#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000
+#define AI_MAILBOX_POLL_MSG_REP_MAX 11
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 666ed99cc14b..dd5c1e6ce009 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -200,7 +200,16 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
static int xgpu_nv_request_reset(struct amdgpu_device *adev)
{
- return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+ int ret, i = 0;
+
+ while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
+ ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+ if (!ret)
+ break;
+ i++;
+ }
+
+ return ret;
}
static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 52605e14a1a5..9f5808616174 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -27,6 +27,7 @@
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
#define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000
+#define NV_MAILBOX_POLL_MSG_REP_MAX 11
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index ac02dd707c44..6bee3677394a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -362,6 +362,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_DIMGREY_CAVEFISH:
return AMD_RESET_METHOD_MODE1;
default:
if (smu_baco_is_support(smu))
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index d65a5339d354..3ba7bdfde65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
- GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 39e17aae655f..f1ba36a094da 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -153,6 +153,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
+ return 0;
+
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -807,6 +810,37 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
+static int sdma_v5_2_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= i;
+
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+ }
+
+ return 0;
+}
+
/**
* sdma_v5_2_start - setup and start the async dma engines
*
@@ -838,6 +872,7 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
+ sdma_v5_2_soft_reset(adev);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
/* enable sdma ring preemption */
@@ -1366,13 +1401,6 @@ static int sdma_v5_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v5_2_soft_reset(void *handle)
-{
- /* todo */
-
- return 0;
-}
-
static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8a23636ecc27..0b3516c4eefb 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1239,7 +1239,8 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
- if (adev->pdev->device == 0x1636)
+ if ((adev->pdev->device == 0x1636) ||
+ (adev->pdev->device == 0x164c))
adev->apu_flags |= AMD_APU_IS_RENOIR;
else
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index b3672d10ea54..e8fb10c41f16 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: MIT
#
-# Heterogenous system architecture configuration
+# Heterogeneous system architecture configuration
#
config HSA_AMD
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 8cac497c2c45..a5640a6138cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1040,11 +1040,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
(struct crat_subtype_iolink *)sub_type_hdr);
if (ret < 0)
return ret;
- crat_table->length += (sub_type_hdr->length * entries);
- crat_table->total_entries += entries;
- sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
- sub_type_hdr->length * entries);
+ if (entries) {
+ crat_table->length += (sub_type_hdr->length * entries);
+ crat_table->total_entries += entries;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length * entries);
+ }
#else
pr_info("IO link not available for non x86 platforms\n");
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 50922ff2927b..72c893fff61a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -422,7 +422,7 @@ static const struct kfd_device_info navi10_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -440,7 +440,7 @@ static const struct kfd_device_info navi12_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -458,7 +458,7 @@ static const struct kfd_device_info navi14_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -476,7 +476,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 4,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -494,7 +494,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -530,7 +530,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f0a6f6665c81..e686ce2bf3b3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -72,8 +72,8 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{
int i;
- int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
- + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
+ int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec
+ + pipe) * dqm->dev->shared_resources.num_queue_per_pipe;
/* queue is available for KFD usage if bit is 1 */
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 797b5d4b43e5..e509a175ed17 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c23896207e9d..961abf1cf040 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -196,10 +196,6 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
-static int amdgpu_dm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock);
-
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
@@ -943,41 +939,6 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
}
#endif
-#ifdef CONFIG_DEBUG_FS
-static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
-{
- dm->crc_win_x_start_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_X_START", 0, U16_MAX);
- if (!dm->crc_win_x_start_property)
- return -ENOMEM;
-
- dm->crc_win_y_start_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_Y_START", 0, U16_MAX);
- if (!dm->crc_win_y_start_property)
- return -ENOMEM;
-
- dm->crc_win_x_end_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_X_END", 0, U16_MAX);
- if (!dm->crc_win_x_end_property)
- return -ENOMEM;
-
- dm->crc_win_y_end_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_Y_END", 0, U16_MAX);
- if (!dm->crc_win_y_end_property)
- return -ENOMEM;
-
- return 0;
-}
-#endif
-
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1125,10 +1086,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
#endif
-#ifdef CONFIG_DEBUG_FS
- if (create_crtc_crc_properties(&adev->dm))
- DRM_ERROR("amdgpu: failed to create crc property.\n");
-#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -1876,8 +1833,8 @@ static void emulated_link_detect(struct dc_link *link)
link->type = dc_connection_none;
prev_sink = link->local_sink;
- if (prev_sink != NULL)
- dc_sink_retain(prev_sink);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
@@ -1977,7 +1934,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
dc_commit_updates_for_stream(
dm->dc, bundle->surface_updates,
dc_state->stream_status->plane_count,
- dc_state->streams[k], &bundle->stream_update, dc_state);
+ dc_state->streams[k], &bundle->stream_update);
}
cleanup:
@@ -2008,8 +1965,7 @@ static void dm_set_dpms_off(struct dc_link *link)
stream_update.stream = stream_state;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
- stream_state, &stream_update,
- stream_state->ctx->dc->current_state);
+ stream_state, &stream_update);
mutex_unlock(&adev->dm.dc_lock);
}
@@ -2212,7 +2168,7 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.get_format_info = amd_get_format_info,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
- .atomic_commit = amdgpu_dm_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
@@ -2373,8 +2329,10 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
@@ -2390,8 +2348,6 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
- drm_add_edid_modes(connector, aconnector->edid);
-
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
@@ -5124,9 +5080,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
int preferred_refresh = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
-#endif
uint32_t link_bandwidth_kbps;
-
+#endif
struct dc_sink *sink = NULL;
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
@@ -5208,11 +5163,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
&dsc_caps);
-#endif
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
/* Set DSC policy according to dsc_clock_en */
dc_dsc_policy_set_enable_dsc_when_not_needed(
@@ -5340,64 +5293,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->crc_src = cur->crc_src;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
-#ifdef CONFIG_DEBUG_FS
- state->crc_window = cur->crc_window;
-#endif
+
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
}
-#ifdef CONFIG_DEBUG_FS
-int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_crtc_state *dm_new_state =
- to_dm_crtc_state(crtc_state);
-
- if (property == adev->dm.crc_win_x_start_property)
- dm_new_state->crc_window.x_start = val;
- else if (property == adev->dm.crc_win_y_start_property)
- dm_new_state->crc_window.y_start = val;
- else if (property == adev->dm.crc_win_x_end_property)
- dm_new_state->crc_window.x_end = val;
- else if (property == adev->dm.crc_win_y_end_property)
- dm_new_state->crc_window.y_end = val;
- else
- return -EINVAL;
-
- return 0;
-}
-
-int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
- const struct drm_crtc_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_crtc_state *dm_state =
- to_dm_crtc_state(state);
-
- if (property == adev->dm.crc_win_x_start_property)
- *val = dm_state->crc_window.x_start;
- else if (property == adev->dm.crc_win_y_start_property)
- *val = dm_state->crc_window.y_start;
- else if (property == adev->dm.crc_win_x_end_property)
- *val = dm_state->crc_window.x_end;
- else if (property == adev->dm.crc_win_y_end_property)
- *val = dm_state->crc_window.y_end;
- else
- return -EINVAL;
-
- return 0;
-}
-#endif
-
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
@@ -5464,10 +5365,6 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-#ifdef CONFIG_DEBUG_FS
- .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
- .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
-#endif
};
static enum drm_connector_status
@@ -6669,25 +6566,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
- struct amdgpu_crtc *acrtc)
-{
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_x_start_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_y_start_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_x_end_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_y_end_property,
- 0);
-}
-#endif
-
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t crtc_index)
@@ -6735,9 +6613,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
-#ifdef CONFIG_DEBUG_FS
- attach_crtc_crc_properties(dm, acrtc);
-#endif
+
return 0;
fail:
@@ -7672,7 +7548,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- uint32_t i;
+ int i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -7713,7 +7589,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_dm_commit_cursors(state);
/* update planes when needed */
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
@@ -7936,8 +7812,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates,
planes_count,
acrtc_state->stream,
- &bundle->stream_update,
- dc_state);
+ &bundle->stream_update);
/**
* Enable or disable the interrupts on the backend.
@@ -8070,20 +7945,6 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
-static int amdgpu_dm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- /*
- * Add check here for SoC's that support hardware cursor plane, to
- * unset legacy_cursor_update
- */
-
- return drm_atomic_helper_commit(dev, state, nonblock);
-
- /*TODO Handle EINTR, reenable IRQ*/
-}
-
/**
* amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
* @state: The atomic state to commit
@@ -8287,13 +8148,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dc_surface_update dummy_updates[MAX_SURFACES];
+ struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
bool abm_changed, hdr_changed, scaling_changed;
- memset(&dummy_updates, 0, sizeof(dummy_updates));
+ memset(&surface_updates, 0, sizeof(surface_updates));
memset(&stream_update, 0, sizeof(stream_update));
if (acrtc) {
@@ -8350,16 +8211,15 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* To fix this, DC should permit updating only stream properties.
*/
for (j = 0; j < status->plane_count; j++)
- dummy_updates[j].surface = status->plane_states[0];
+ surface_updates[j].surface = status->plane_states[j];
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
- dummy_updates,
+ surface_updates,
status->plane_count,
dm_new_crtc_state->stream,
- &stream_update,
- dc_state);
+ &stream_update);
mutex_unlock(&dm->dc_lock);
}
@@ -8388,7 +8248,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- bool configure_crc = false;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -8398,30 +8257,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
- }
+
#ifdef CONFIG_DEBUG_FS
- if (new_crtc_state->active &&
- amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
- if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
- configure_crc = true;
- } else {
- if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
- configure_crc = true;
- }
-
- if (configure_crc)
+ if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
amdgpu_dm_crtc_configure_crc_source(
- crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
- }
+ crtc, dm_new_crtc_state,
+ dm_new_crtc_state->crc_src);
+ }
#endif
+ }
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
@@ -8506,14 +8356,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
- goto err;
+ goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
- goto err;
+ goto out;
/* force a restore */
crtc_state->mode_changed = true;
@@ -8523,17 +8373,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
- goto err;
-
+ goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
- if (!ret)
- return 0;
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+out:
drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}
@@ -9388,7 +9236,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- if (dm_old_crtc_state->dsc_force_changed && new_crtc_state)
+ if (dm_old_crtc_state->dsc_force_changed)
new_crtc_state->mode_changed = true;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 0b31779a0485..1182dafcef02 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -336,13 +336,6 @@ struct amdgpu_display_manager {
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#ifdef CONFIG_DEBUG_FS
- /* set the crc calculation window*/
- struct drm_property *crc_win_x_start_property;
- struct drm_property *crc_win_y_start_property;
- struct drm_property *crc_win_x_end_property;
- struct drm_property *crc_win_y_end_property;
-#endif
/**
* @mst_encoders:
*
@@ -429,15 +422,6 @@ struct dm_plane_state {
struct dc_plane_state *dc_state;
};
-#ifdef CONFIG_DEBUG_FS
-struct crc_rec {
- uint16_t x_start;
- uint16_t y_start;
- uint16_t x_end;
- uint16_t y_end;
- };
-#endif
-
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
@@ -460,9 +444,6 @@ struct dm_crtc_state {
struct dc_info_packet vrr_infopacket;
int abm_level;
-#ifdef CONFIG_DEBUG_FS
- struct crc_rec crc_window;
-#endif
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index ff6db26626ea..66cb8730586b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -81,33 +81,6 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
return pipe_crc_sources;
}
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
-{
- bool ret = true;
-
- if ((dm_crtc_state->crc_window.x_start != 0) ||
- (dm_crtc_state->crc_window.y_start != 0) ||
- (dm_crtc_state->crc_window.x_end != 0) ||
- (dm_crtc_state->crc_window.y_end != 0))
- ret = false;
-
- return ret;
-}
-
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
- struct dm_crtc_state *dm_old_crtc_state)
-{
- bool ret = false;
-
- if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) ||
- (dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) ||
- (dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) ||
- (dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end))
- ret = true;
-
- return ret;
-}
-
int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
@@ -132,7 +105,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
int ret = 0;
- struct crc_params *crc_window = NULL, tmp_window;
/* Configuration will be deferred to stream enable. */
if (!stream_state)
@@ -141,22 +113,9 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
/* Enable CRTC CRC generation if necessary. */
- if (dm_is_crc_source_crtc(source)) {
- if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
- crc_window = &tmp_window;
-
- tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start;
- tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start;
- tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end;
- tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end;
- tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start;
- tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start;
- tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end;
- tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end;
- }
-
+ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
- stream_state, crc_window, enable, enable)) {
+ stream_state, NULL, enable, enable)) {
ret = -EINVAL;
goto unlock;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 0235bfb246e5..f7d731797d3f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -47,9 +47,6 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
/* amdgpu_dm_crc.c */
#ifdef CONFIG_DEBUG_FS
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
- struct dm_crtc_state *dm_old_crtc_state);
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 357778556b06..26ed70e5538a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -165,7 +165,10 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
list);
- if (ih == handler) {
+ if (handler == NULL)
+ continue;
+
+ if (ih == handler->handler) {
/* Found our handler. Remove it from the list. */
list_del(&handler->list);
handler_removed = true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6f975c16779d..f2d8cf34be46 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -24,6 +24,7 @@
*/
#include <linux/version.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_dp_helper.h>
@@ -252,8 +253,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
static struct drm_encoder *
dm_mst_atomic_best_encoder(struct drm_connector *connector,
- struct drm_connector_state *connector_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ connector);
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
@@ -830,6 +833,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
+ if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
+
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
mutex_unlock(&aconnector->mst_mgr.lock);
@@ -847,7 +853,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
+ if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 64f515d74410..f3c00f479e1c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
calcs_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-calcs_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d59b380e7b7f..ff96bee57bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -104,13 +104,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
@@ -125,13 +118,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
@@ -146,13 +132,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 6f4fe8fce6b7..01b1853b7750 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -75,15 +75,8 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
- /*
- * Only notify active stream or virtual stream.
- * Need to notify virtual stream to work around
- * headless case. HPD does not fire when system is in
- * S0i2.
- */
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
@@ -234,12 +227,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- rn_update_clocks_update_dpp_dto(
- clk_mgr,
- context,
- clk_mgr_base->clks.actual_dppclk_khz,
- safe_to_lower);
+ rn_update_clocks_update_dpp_dto(
+ clk_mgr,
+ context,
+ clk_mgr_base->clks.actual_dppclk_khz,
+ safe_to_lower);
}
if (update_dispclk &&
@@ -738,32 +730,32 @@ static struct wm_table ddr4_wm_table_rn = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9.09,
- .sr_enter_plus_exit_time_us = 10.14,
+ .sr_exit_time_us = 11.90,
+ .sr_enter_plus_exit_time_us = 12.80,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 13.18,
+ .sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 13.18,
+ .sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 13.18,
+ .sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 11a7b583d561..7deeec9d1c7c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -99,7 +99,7 @@ int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
- result = rn_smu_wait_for_response(clk_mgr, 10, 1000);
+ result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 5b466f440d67..ab98c259ef69 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -251,6 +251,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool update_uclk = false;
+ bool p_state_change_support;
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
return;
@@ -291,8 +292,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 9a8e66bba9c0..991b9c5beaa3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -74,15 +74,8 @@ int vg_get_active_display_cnt_wa(
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
- /*
- * Only notify active stream or virtual stream.
- * Need to notify virtual stream to work around
- * headless case. HPD does not fire when system is in
- * S0i2.
- */
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 58eb0d69873a..6cf1a5a2a5ec 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2679,8 +2679,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state)
+ struct dc_stream_update *stream_update)
{
const struct dc_stream_status *stream_status;
enum surface_update_type update_type;
@@ -2699,6 +2698,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
+ struct dc_plane_state *new_planes[MAX_SURFACES];
+
+ memset(new_planes, 0, sizeof(new_planes));
+
+ for (i = 0; i < surface_count; i++)
+ new_planes[i] = srf_updates[i].surface;
/* initialize scratch memory for building context */
context = dc_create_state(dc);
@@ -2707,15 +2712,21 @@ void dc_commit_updates_for_stream(struct dc *dc,
return;
}
- dc_resource_state_copy_construct(state, context);
+ dc_resource_state_copy_construct(
+ dc->current_state, context);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ /*remove old surfaces from context */
+ if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+ DC_ERROR("Failed to remove streams for new validate context!\n");
+ return;
+ }
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
+ /* add surface to context */
+ if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+ DC_ERROR("Failed to add streams for new validate context!\n");
+ return;
}
+
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a901baf2aaef..f4a2088ab179 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2487,9 +2487,14 @@ enum dc_status dc_link_validate_mode_timing(
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
- struct dc *dc = link->ctx->dc;
+ struct dc *dc = NULL;
struct abm *abm = NULL;
+ if (!link || !link->ctx)
+ return NULL;
+
+ dc = link->ctx->dc;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx.stream;
@@ -3267,9 +3272,6 @@ void core_link_enable_stream(
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
-#endif
-
/* turn off otg test pattern if enable */
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 6b11d4af54af..1e4794e2825c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -892,13 +892,13 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
switch (dpcd_aux_read_interval) {
case 0x01:
- aux_rd_interval_us = 400;
+ aux_rd_interval_us = 4000;
break;
case 0x02:
- aux_rd_interval_us = 4000;
+ aux_rd_interval_us = 8000;
break;
case 0x03:
- aux_rd_interval_us = 8000;
+ aux_rd_interval_us = 12000;
break;
case 0x04:
aux_rd_interval_us = 16000;
@@ -2399,6 +2399,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
initial_link_setting;
uint32_t link_bw;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
@@ -3045,14 +3048,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_disable_stream(pipe_ctx);
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
@@ -3173,13 +3176,7 @@ static void get_active_converter_info(
}
/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
- if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
- link->dpcd_caps.is_branch_dev = false;
- }
-
- else {
- link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
- }
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:
@@ -3998,7 +3995,7 @@ bool dc_link_dp_set_test_pattern(
unsigned int cust_pattern_size)
{
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = &pipes[0];
+ struct pipe_ctx *pipe_ctx = NULL;
unsigned int lane;
unsigned int i;
unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
@@ -4008,12 +4005,18 @@ bool dc_link_dp_set_test_pattern(
memset(&training_pattern, 0, sizeof(training_pattern));
for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream == NULL)
+ continue;
+
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
pipe_ctx = &pipes[i];
break;
}
}
+ if (pipe_ctx == NULL)
+ return false;
+
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index b8f1e2d33423..3aedadb34548 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.115"
+#define DC_VER "3.2.116"
#define MAX_SURFACES 3
#define MAX_PLANES 6
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index b7910976b81a..e243c01b9672 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -283,8 +283,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state);
+ struct dc_stream_update *stream_update);
/*
* Log the current stream state.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index b409f6b2bfd8..210466b2d863 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -119,7 +119,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy,
- .get_max_link_cap = dce110_link_encoder_get_max_link_cap
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
+ .get_dig_frontend = dce110_get_dig_frontend,
};
static enum bp_result link_transmitter_control(
@@ -235,6 +236,44 @@ static void set_link_training_complete(
}
+unsigned int dce110_get_dig_frontend(struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ u32 value;
+ enum engine_id result;
+
+ REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
+
+ switch (value) {
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGA:
+ result = ENGINE_ID_DIGA;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGB:
+ result = ENGINE_ID_DIGB;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGC:
+ result = ENGINE_ID_DIGC;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGD:
+ result = ENGINE_ID_DIGD;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGE:
+ result = ENGINE_ID_DIGE;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGF:
+ result = ENGINE_ID_DIGF;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGG:
+ result = ENGINE_ID_DIGG;
+ break;
+ default:
+ // invalid source select DIG
+ result = ENGINE_ID_UNKNOWN;
+ }
+
+ return result;
+}
+
void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc,
uint32_t index)
@@ -1665,7 +1704,8 @@ static const struct link_encoder_funcs dce60_lnk_enc_funcs = {
.disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy,
- .get_max_link_cap = dce110_link_encoder_get_max_link_cap
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
+ .get_dig_frontend = dce110_get_dig_frontend
};
void dce60_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index cb714a48b171..fc6ade824c23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -295,6 +295,8 @@ void dce110_link_encoder_connect_dig_be_to_fe(
enum engine_id engine,
bool connect);
+unsigned int dce110_get_dig_frontend(struct link_encoder *enc);
+
void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc,
uint32_t index);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 82bc4e192bbf..915fbb8e8168 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -1268,7 +1268,7 @@ void dce120_timing_generator_construct(
tg110->min_h_front_porch = 0;
tg110->min_h_back_porch = 0;
- tg110->min_h_sync_width = 8;
+ tg110->min_h_sync_width = 4;
tg110->min_v_sync_width = 1;
tg110->min_v_blank = 3;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 733e6e6e43bd..62ad1a11bff9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -31,11 +31,4 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
-# fix:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# aarch64 does not support soft-float, so use hard-float and handle this in code
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
-endif
-
AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 75637c291e75..6f42d10dd772 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -124,11 +124,11 @@ bool hubbub1_verify_allow_pstate_change_high(
* still not asserted, we are probably stuck and going to hang
*
* TODO: Figure out why it takes ~100us on linux
- * pstate takes around ~100us on linux. Unknown currently as to
- * why it takes that long on linux
+ * pstate takes around ~100us (up to 200us) on linux. Unknown currently
+ * as to why it takes that long on linux
*/
const unsigned int pstate_wait_timeout_us = 200;
- const unsigned int pstate_wait_expected_timeout_us = 40;
+ const unsigned int pstate_wait_expected_timeout_us = 180;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 41679ad531c5..9e796dfeac20 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1241,6 +1241,22 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
+bool hubp1_in_blank(struct hubp *hubp)
+{
+ uint32_t in_blank;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL, HUBP_IN_BLANK, &in_blank);
+ return in_blank ? true : false;
+}
+
+void hubp1_soft_reset(struct hubp *hubp, bool reset)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
+}
+
void hubp1_init(struct hubp *hubp)
{
//do nothing
@@ -1272,6 +1288,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
+ .hubp_soft_reset = hubp1_soft_reset,
+ .hubp_in_blank = hubp1_in_blank,
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 780af5b3c16f..a9a6ed7f4f99 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -260,6 +260,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
@@ -455,6 +456,7 @@
type HUBP_VTG_SEL;\
type HUBP_UNDERFLOW_STATUS;\
type HUBP_UNDERFLOW_CLEAR;\
+ type HUBP_IN_BLANK;\
type NUM_PIPES;\
type NUM_BANKS;\
type PIPE_INTERLEAVE;\
@@ -772,5 +774,7 @@ void hubp1_vready_workaround(struct hubp *hubp,
void hubp1_init(struct hubp *hubp);
void hubp1_read_state_common(struct hubp *hubp);
+bool hubp1_in_blank(struct hubp *hubp);
+void hubp1_soft_reset(struct hubp *hubp, bool reset);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 9f7d6b087553..017b67b830e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -647,8 +647,13 @@ static void power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, plane_id, true);
- hws->funcs.hubp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, plane_id, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -1082,8 +1087,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, dpp->inst, false);
- hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -2736,7 +2746,7 @@ static void dcn10_program_all_pipe_in_tree(
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 3fcd408e9103..b096011acb49 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -467,6 +467,17 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
}
+unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ uint32_t val = 0xf;
+
+ if (opp_id < MAX_OPP && REG(MUX[opp_id]))
+ REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
+
+ return val;
+}
+
static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -483,6 +494,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.set_denorm_clamp = NULL,
.set_output_csc = NULL,
.set_output_gamma = NULL,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index 66a4719c22a0..dbfffc6383dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -200,4 +200,5 @@ void mpc1_read_mpcc_state(
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
+unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index a125d3f05c81..f033397a84e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -272,7 +272,7 @@ void optc1_program_timing(
vupdate_offset,
vupdate_width);
- optc->funcs->set_vtg_params(optc, dc_crtc_timing);
+ optc->funcs->set_vtg_params(optc, dc_crtc_timing, true);
/* TODO
* patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
@@ -312,7 +312,7 @@ void optc1_program_timing(
}
void optc1_set_vtg_params(struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing)
+ const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2)
{
struct dc_crtc_timing patched_crtc_timing;
uint32_t asic_blank_end;
@@ -348,9 +348,12 @@ void optc1_set_vtg_params(struct timing_generator *optc,
}
}
- REG_UPDATE_2(CONTROL,
- VTG0_FP2, v_fp2,
- VTG0_VCOUNT_INIT, v_init);
+ if (program_fp2)
+ REG_UPDATE_2(CONTROL,
+ VTG0_FP2, v_fp2,
+ VTG0_VCOUNT_INIT, v_init);
+ else
+ REG_UPDATE(CONTROL, VTG0_VCOUNT_INIT, v_init);
}
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
@@ -1540,7 +1543,7 @@ void dcn10_timing_generator_init(struct optc *optc1)
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
- optc1->min_h_sync_width = 8;
+ optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 344eb487219e..b12bd9aae52f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -700,6 +700,6 @@ bool optc1_get_crc(struct timing_generator *optc,
bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
void optc1_set_vtg_params(struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing);
+ const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index bdc37831535e..90e912fef2b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
- .force_single_disp_pipe_split = true,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
.disable_stereo_support = true,
@@ -1534,15 +1534,8 @@ static bool dcn10_resource_construct(
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
-#if defined(CONFIG_ARM64)
- /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
- DC_FP_START();
- dcn10_resource_construct_fp(dc);
- DC_FP_END();
-#else
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
-#endif
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 9e38c37c1d73..76b334644f9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -81,7 +81,9 @@
SRI(DP_MSE_RATE_UPDATE, DP, id), \
SRI(DP_PIXEL_FORMAT, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_SEC_CNTL2, DP, id), \
+ SRI(DP_SEC_CNTL5, DP, id), \
SRI(DP_SEC_CNTL6, DP, id), \
SRI(DP_STEER_FIFO, DP, id), \
SRI(DP_VID_M, DP, id), \
@@ -126,7 +128,9 @@ struct dcn10_stream_enc_registers {
uint32_t DP_MSE_RATE_UPDATE;
uint32_t DP_PIXEL_FORMAT;
uint32_t DP_SEC_CNTL;
+ uint32_t DP_SEC_CNTL1;
uint32_t DP_SEC_CNTL2;
+ uint32_t DP_SEC_CNTL5;
uint32_t DP_SEC_CNTL6;
uint32_t DP_STEER_FIFO;
uint32_t DP_VID_M;
@@ -411,6 +415,8 @@ struct dcn10_stream_enc_registers {
type DP_SEC_GSP3_ENABLE;\
type DP_SEC_GSP4_ENABLE;\
type DP_SEC_GSP5_ENABLE;\
+ type DP_SEC_GSP5_LINE_NUM;\
+ type DP_SEC_GSP5_LINE_REFERENCE;\
type DP_SEC_GSP6_ENABLE;\
type DP_SEC_GSP7_ENABLE;\
type DP_SEC_GSP7_PPS;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 624cb1341ef1..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -17,10 +17,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index b7e44e53a342..0df0da2e6a4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1595,6 +1595,8 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
.validate_dml_output = hubp2_validate_dml_output,
+ .hubp_in_blank = hubp1_in_blank,
+ .hubp_soft_reset = hubp1_soft_reset,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index abcb06044e6e..480d928cb1ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1062,8 +1062,13 @@ static void dcn20_power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -1586,7 +1591,10 @@ static void dcn20_program_pipe(
&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
- if (pipe_ctx->update_flags.bits.global_sync) {
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe) {
+
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
pipe_ctx->pipe_dlg_param.vready_offset,
@@ -1594,8 +1602,11 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
@@ -1695,14 +1706,6 @@ void dcn20_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
- /* wait for outstanding pending changes before adding or removing planes */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- context->res_ctx.pipe_ctx[i].update_flags.bits.enable) {
- dc->hwss.wait_for_pending_cleared(dc, context);
- break;
- }
- }
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1856,7 +1859,7 @@ bool dcn20_update_bandwidth(
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
if (pipe_ctx->prev_odm_pipe == NULL)
hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
@@ -2251,11 +2254,11 @@ void dcn20_get_mpctree_visual_confirm_color(
{
const struct tg_color pipe_colors[6] = {
{MAX_TG_COLOR_VALUE, 0, 0}, // red
- {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
- {0, MAX_TG_COLOR_VALUE, 0}, // blue
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, // orange
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, // yellow
+ {0, MAX_TG_COLOR_VALUE, 0}, // green
+ {0, 0, MAX_TG_COLOR_VALUE}, // blue
{MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple
- {0, 0, MAX_TG_COLOR_VALUE}, // green
- {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange
};
struct pipe_ctx *top_pipe = pipe_ctx;
@@ -2280,14 +2283,11 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
// input to MPCC is always RGB, by default leave black_color at 0
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
- hws->funcs.get_hdr_visual_confirm_color(
- pipe_ctx, &blnd_cfg.black_color);
+ hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
- hws->funcs.get_surface_visual_confirm_color(
- pipe_ctx, &blnd_cfg.black_color);
+ hws->funcs.get_surface_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
- dcn20_get_mpctree_visual_confirm_color(
- pipe_ctx, &blnd_cfg.black_color);
+ dcn20_get_mpctree_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
}
if (per_pixel_alpha)
@@ -2581,4 +2581,4 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc,
{
pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
color_space, color_depth, solid_color, width, height, offset);
-} \ No newline at end of file
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 99cc095dc33c..6a99fdd55e8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -556,6 +556,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.set_ocsc_default = mpc2_set_ocsc_default,
.set_output_gamma = mpc2_set_output_gamma,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
};
void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index ff36db5edf6c..5ed18cac57e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1933,7 +1933,7 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
else
next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp;
- if (next_odm_pipe->stream->timing.flags.DSC == 1) {
+ if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) {
dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
@@ -2517,8 +2517,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
* if this primary pipe has a bottom pipe in prev. state
* and if the bottom pipe is still available (which it should be),
* pick that pipe as secondary
- * Same logic applies for ODM pipes. Since mpo is not allowed with odm
- * check in else case.
+ * Same logic applies for ODM pipes
*/
if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
@@ -2526,7 +2525,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
}
- } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+ }
+ if (secondary_pipe == NULL &&
+ dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index d2a805bd4573..9a881e639709 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -83,6 +83,8 @@
SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DOLBY_VISION_EN, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\
SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh)
void dcn20_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 1ee5fc03b7b3..bb8c95141082 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -13,10 +13,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
index b7efa777ec73..e44a37491c1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
@@ -32,5 +32,6 @@ struct dccg *dccg21_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
+void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
#endif /* __DCN21_DCCG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 1c88d2edd381..674376428916 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.num_banks = 8,
.num_chans = 4,
.vmm_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 23.84,
+ .dram_clock_change_latency_us = 11.72,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
@@ -906,6 +906,8 @@ enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_PLL3,
+ DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
@@ -2030,6 +2032,14 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index 248c2711aace..dfd77b3cc84d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -32,8 +32,8 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -msse
endif
ifdef CONFIG_PPC64
@@ -41,15 +41,12 @@ CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mhard-float
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mhard-float
endif
ifdef CONFIG_X86
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 2ae159e2dd6e..46ea39f5ef8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -51,7 +51,7 @@
(enc10->link_regs->index)
-static bool dcn30_link_encoder_validate_output_with_stream(
+bool dcn30_link_encoder_validate_output_with_stream(
struct link_encoder *enc,
const struct dc_stream_state *stream)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
index 2fbf879cd327..f2d90f2b8bf1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
@@ -78,4 +78,8 @@ void dcn30_link_encoder_construct(
void enc3_hw_init(struct link_encoder *enc);
+bool dcn30_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream);
+
#endif /* __DC_LINK_ENCODER__DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index af462fe4260d..88ffa9ff1ed1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -509,6 +509,8 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp3_init,
+ .hubp_in_blank = hubp1_in_blank,
+ .hubp_soft_reset = hubp1_soft_reset,
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 283995ab9eeb..3deb3fb1724d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -668,7 +668,7 @@ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
- if (!is_hdmi_tmds)
+ if (!is_hdmi_tmds && !is_dp)
return;
if (is_hdmi_tmds)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index d7d053fc6e91..3e6f76096119 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1428,6 +1428,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.program_3dlut = mpc3_program_3dlut,
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index b1f228fc119a..3ba3991ee612 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -350,7 +350,7 @@ void dcn30_timing_generator_init(struct optc *optc1)
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
- optc1->min_h_sync_width = 8;
+ optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 2fd5d34e4ba6..09264716d1dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -14,21 +14,18 @@ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
endif
ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
endif
ifdef CONFIG_X86
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4825c5c1c6ed..35f5bf08ae96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1731,6 +1731,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
index 36e44e1b07fa..101620a8867a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
@@ -13,21 +13,18 @@
DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -msse
endif
ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float
endif
ifdef CONFIG_X86
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index a02a33dcd70b..6bb7f2905821 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
dml_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dml_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 860e72a51534..80170f9721ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2635,14 +2635,15 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
- mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
- mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.DRAMClockChangeWatermark >
- dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
- mode_lib->vba.MinTTUVBlank[k] += 25;
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ if (mode_lib->vba.DRAMClockChangeWatermark >
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ mode_lib->vba.MinTTUVBlank[k] += 25;
+ }
}
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index f2624a1156e5..8d31eb75c6a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -10,10 +10,6 @@ ifdef CONFIG_PPC64
dsc_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dsc_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 315e3061c592..22f3f643ed1b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -188,6 +188,8 @@ struct hubp_funcs {
void (*set_unbounded_requesting)(
struct hubp *hubp,
bool enable);
+ bool (*hubp_in_blank)(struct hubp *hubp);
+ void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 879f502ae530..75c77ad9cbfe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -359,6 +359,10 @@ struct mpc_funcs {
int (*release_rmu)(struct mpc *mpc, int mpcc_id);
+ unsigned int (*get_mpc_out_mux)(
+ struct mpc *mpc,
+ int opp_id);
+
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 12d5718caea8..f7632fe25976 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -271,7 +271,7 @@ struct timing_generator_funcs {
struct dc_crtc_timing *hw_crtc_timing);
void (*set_vtg_params)(struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing);
+ const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
void (*set_dsc_config)(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 95cb56929e79..126c2f3a4dd3 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -55,10 +55,6 @@
#include <asm/fpu/api.h>
#define DC_FP_START() kernel_fpu_begin()
#define DC_FP_END() kernel_fpu_end()
-#elif defined(CONFIG_ARM64)
-#include <asm/neon.h>
-#define DC_FP_START() kernel_neon_begin()
-#define DC_FP_END() kernel_neon_end()
#elif defined(CONFIG_PPC64)
#include <asm/switch_to.h>
#include <asm/cputable.h>
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index b20a39f488ae..249a076d6f69 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x931573111
+#define DMUB_FW_VERSION_GIT_HASH 0xf51b86a
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 45
+#define DMUB_FW_VERSION_REVISION 47
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -514,12 +514,20 @@ enum dp_aux_request_action {
enum aux_return_code_type {
AUX_RET_SUCCESS = 0,
+ AUX_RET_ERROR_UNKNOWN,
+ AUX_RET_ERROR_INVALID_REPLY,
AUX_RET_ERROR_TIMEOUT,
- AUX_RET_ERROR_NO_DATA,
+ AUX_RET_ERROR_HPD_DISCON,
+ AUX_RET_ERROR_ENGINE_ACQUIRE,
AUX_RET_ERROR_INVALID_OPERATION,
AUX_RET_ERROR_PROTOCOL_ERROR,
};
+enum aux_channel_type {
+ AUX_CHANNEL_LEGACY_DDC,
+ AUX_CHANNEL_DPIA
+};
+
/* DP AUX command */
struct aux_transaction_parameters {
uint8_t is_i2c_over_aux;
@@ -532,9 +540,10 @@ struct aux_transaction_parameters {
struct dmub_cmd_dp_aux_control_data {
uint32_t handle;
- uint8_t port_index;
+ uint8_t instance;
uint8_t sw_crc_enabled;
uint16_t timeout;
+ enum aux_channel_type type;
struct aux_transaction_parameters dpaux;
};
@@ -558,7 +567,7 @@ struct aux_reply_data {
struct aux_reply_control_data {
uint32_t handle;
- uint8_t phy_port_index;
+ uint8_t instance;
uint8_t result;
uint16_t pad;
};
@@ -581,7 +590,7 @@ enum dp_hpd_status {
};
struct dp_hpd_data {
- uint8_t phy_port_index;
+ uint8_t instance;
uint8_t hpd_type;
uint8_t hpd_status;
uint8_t pad;
@@ -732,27 +741,30 @@ enum dmub_cmd_abm_type {
struct abm_config_table {
/* Parameters for crgb conversion */
uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; // 0B
- uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 15B
- uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 31B
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 16B
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 32B
/* Parameters for custom curve */
- uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 47B
- uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 79B
-
- uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 111B
- uint16_t min_abm_backlight; // 121B
-
- uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 123B
- uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 143B
- uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 163B
- uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 183B
- uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 203B
- uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 207B
- uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 211B
- uint8_t min_knee[NUM_AGGR_LEVEL]; // 215B
- uint8_t max_knee[NUM_AGGR_LEVEL]; // 219B
- uint8_t iir_curve[NUM_AMBI_LEVEL]; // 223B
- uint8_t pad3[3]; // 228B
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 48B
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 78B
+
+ uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 112B
+ uint16_t min_abm_backlight; // 122B
+
+ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 124B
+ uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 144B
+ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 164B
+ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 184B
+ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 204B
+ uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 208B
+ uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 212B
+ uint8_t min_knee[NUM_AGGR_LEVEL]; // 216B
+ uint8_t max_knee[NUM_AGGR_LEVEL]; // 220B
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; // 224B
+ uint8_t pad3[3]; // 229B
+
+ uint16_t blRampReduction[NUM_AGGR_LEVEL]; // 232B
+ uint16_t blRampStart[NUM_AGGR_LEVEL]; // 240B
};
struct dmub_cmd_abm_set_pipe_data {
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index eced40a2fce4..5c67e12b2e55 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -30,6 +30,14 @@
#include "opp.h"
#include "color_gamma.h"
+/* When calculating LUT values the first region and at least one subsequent
+ * region are calculated with full precision. These defines are a demarcation
+ * of where the second region starts and ends.
+ * These are hardcoded values to avoid recalculating them in loops.
+ */
+#define PRECISE_LUT_REGION_START 224
+#define PRECISE_LUT_REGION_END 239
+
static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
// these are helpers for calculations to reduce stack usage
@@ -346,7 +354,13 @@ static struct fixed31_32 translate_from_linear_space(
dc_fixpt_recip(args->gamma));
}
scratch_1 = dc_fixpt_add(one, args->a3);
- if (cal_buffer->buffer_index < 16)
+ /* In the first region (first 16 points) and in the
+ * region delimited by START/END we calculate with
+ * full precision to avoid error accumulation.
+ */
+ if ((cal_buffer->buffer_index >= PRECISE_LUT_REGION_START &&
+ cal_buffer->buffer_index <= PRECISE_LUT_REGION_END) ||
+ (cal_buffer->buffer_index < 16))
scratch_2 = dc_fixpt_pow(args->arg,
dc_fixpt_recip(args->gamma));
else
@@ -397,9 +411,7 @@ static struct fixed31_32 translate_from_linear_space_long(
dc_fixpt_recip(args->gamma))),
args->a2);
else
- return dc_fixpt_mul(
- args->arg,
- args->a1);
+ return dc_fixpt_mul(args->arg, args->a1);
}
static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf, struct calculate_buffer *cal_buffer)
@@ -717,7 +729,6 @@ static struct fixed31_32 calculate_mapped_value(
BREAK_TO_DEBUGGER();
result = dc_fixpt_zero;
} else {
- BREAK_TO_DEBUGGER();
result = dc_fixpt_one;
}
@@ -976,6 +987,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
cal_buffer->buffer_index = 0; // see var definition for more info
rgb += 32; // first 32 points have problems with fixed point, too small
coord_x += 32;
+
for (i = 32; i <= hw_points_num; i++) {
if (!is_clipped) {
if (use_eetf) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index f244b72e74e0..73ca49f05bd3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -128,8 +128,12 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
- /* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+ /* Some MST display may choose to report the internal panel as an HDCP RX.
+ * To update this condition with 1(because the immediate repeater's internal
+ * panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp).
+ * Device count must be greater than or equal to tracked hdcp displays.
+ */
+ return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 549c113abcf7..a0895a7efda2 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -207,8 +207,11 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
- /* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+ /* Some MST display may choose to report the internal panel as an HDCP RX. */
+ /* To update this condition with 1(because the immediate repeater's internal */
+ /* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */
+ /* Device count must be greater than or equal to tracked hdcp displays. */
+ return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index cc983f662157..4fd8bce95d84 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -82,22 +82,24 @@ struct abm_parameters {
unsigned char deviation_gain;
unsigned char min_knee;
unsigned char max_knee;
+ unsigned short blRampReduction;
+ unsigned short blRampStart;
};
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
-// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0},
- {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf},
- {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0},
- {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
+ {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xCCCC, 0xCCCC},
+ {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf, 0xCCCC, 0xCCCC},
+ {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xCCCC, 0xCCCC},
+ {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
};
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
-// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
- {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
- {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
+ {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
+ {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
+ {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
+ {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
};
static const struct abm_parameters * const abm_settings[] = {
@@ -662,6 +664,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
{
struct iram_table_v_2_2 ram_table;
struct abm_config_table config;
+ unsigned int set = params.set;
bool result = false;
uint32_t i, j = 0;
@@ -710,6 +713,18 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
config.max_knee[i] = ram_table.max_knee[i];
}
+ if (params.backlight_ramping_override) {
+ for (i = 0; i < NUM_AGGR_LEVEL; i++) {
+ config.blRampReduction[i] = params.backlight_ramping_reduction;
+ config.blRampStart[i] = params.backlight_ramping_start;
+ }
+ } else {
+ for (i = 0; i < NUM_AGGR_LEVEL; i++) {
+ config.blRampReduction[i] = abm_settings[set][i].blRampReduction;
+ config.blRampStart[i] = abm_settings[set][i].blRampStart;
+ }
+ }
+
config.min_abm_backlight = ram_table.min_abm_backlight;
#if defined(CONFIG_DRM_AMD_DC_DCN)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index fa4728d88092..6f2eecce6baa 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -39,6 +39,7 @@ enum abm_defines {
struct dmcu_iram_parameters {
unsigned int *backlight_lut_array;
unsigned int backlight_lut_array_size;
+ bool backlight_ramping_override;
unsigned int backlight_ramping_reduction;
unsigned int backlight_ramping_start;
unsigned int min_abm_backlight;
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index c38635992101..3cb8d4c5c1a3 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -499,6 +499,7 @@ enum atombios_firmware_capability
ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400,
+ ATOM_FIRMWARE_CAP_ENABLE_2ND_USB20PORT = 0x0008000,
};
enum atom_cooling_solution_id{
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 89be49a43500..0d797fa9f5cc 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -227,6 +227,7 @@ struct smu_bios_boot_up_values
uint32_t content_revision;
uint32_t fclk;
uint32_t lclk;
+ uint32_t firmware_caps;
};
enum smu_table_id
@@ -552,6 +553,7 @@ struct pptable_funcs {
*clock_req);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
+ int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
int (*gfx_off_control)(struct smu_context *smu, bool enable);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 4a6d1381df16..720d15612fe1 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -178,7 +178,7 @@
__SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW), \
__SMU_DUMMY_MAP(GET_UMC_FW_WA), \
__SMU_DUMMY_MAP(Mode1Reset), \
- __SMU_DUMMY_MAP(Spare), \
+ __SMU_DUMMY_MAP(RlcPowerNotify), \
__SMU_DUMMY_MAP(SetHardMinIspiclkByFreq), \
__SMU_DUMMY_MAP(SetHardMinIspxclkByFreq), \
__SMU_DUMMY_MAP(SetSoftMinSocclkByFreq), \
@@ -209,6 +209,8 @@
__SMU_DUMMY_MAP(SetSoftMinCclk), \
__SMU_DUMMY_MAP(SetSoftMaxCclk), \
__SMU_DUMMY_MAP(SetGpoFeaturePMask), \
+ __SMU_DUMMY_MAP(DisallowGpo), \
+ __SMU_DUMMY_MAP(Enable2ndUSB20Port), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index e5aa0725147c..5d0b29653ffa 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -30,7 +30,7 @@
#define SMU11_DRIVER_IF_VERSION_NV10 0x36
#define SMU11_DRIVER_IF_VERSION_NV12 0x36
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3B
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xC
#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
@@ -203,6 +203,9 @@ int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode);
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+
int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
index 35dd6072cc45..d2e10a724560 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
@@ -134,6 +134,10 @@
#define PPSMC_MSG_SetGpoFeaturePMask 0x45
#define PPSMC_MSG_SetSMBUSInterrupt 0x46
-#define PPSMC_Message_Count 0x47
+#define PPSMC_MSG_DisallowGpo 0x56
+
+#define PPSMC_MSG_Enable2ndUSB20Port 0x57
+
+#define PPSMC_Message_Count 0x58
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
index 7e69b3bd311b..55d7892e4e0e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
@@ -41,7 +41,7 @@
#define PPSMC_MSG_PowerUpIspByTile 0x7
#define PPSMC_MSG_PowerDownVcn 0x8 // VCN is power gated by default
#define PPSMC_MSG_PowerUpVcn 0x9
-#define PPSMC_MSG_spare 0xA
+#define PPSMC_MSG_RlcPowerNotify 0xA
#define PPSMC_MSG_SetHardMinVcn 0xB // For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0xC //Sets SoftMin for GFXCLK. Arg is in MHz
#define PPSMC_MSG_ActiveProcessNotify 0xD
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index e57e64bbacdc..88322781e447 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -251,7 +251,7 @@ static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cl
smu10_data->gfx_actual_soft_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- smu10_data->gfx_actual_soft_min_freq,
+ clock,
NULL);
}
return 0;
@@ -558,7 +558,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
-
+ /* disabled fine grain tuning function by default */
+ data->fine_grain_enabled = 0;
return result;
}
@@ -597,6 +598,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
+ uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
@@ -613,6 +615,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_max_freq_limit/100,
@@ -648,6 +658,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -658,6 +676,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
min_mclk,
@@ -668,6 +694,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
SMU10_UMD_PSTATE_GFXCLK,
@@ -703,6 +737,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -741,6 +783,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_min_freq_limit/100,
@@ -759,6 +809,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
+ data->fine_grain_enabled = 1;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@@ -948,6 +999,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
+ uint32_t min_freq, max_freq = 0;
+ uint32_t ret = 0;
switch (type) {
case PP_SCLK:
@@ -983,18 +1036,28 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
- (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
- size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+ (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- uint32_t min_freq, max_freq = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
@@ -1414,23 +1477,96 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
+ uint32_t min_freq, max_freq = 0;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
if (!hwmgr->od_enabled) {
pr_err("Fine grain not support\n");
return -EINVAL;
}
- if (size != 2) {
- pr_err("Input parameter number not correct\n");
+ if (!smu10_data->fine_grain_enabled) {
+ pr_err("Fine grain not started\n");
return -EINVAL;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
- if (input[0] == 0)
- smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
- else if (input[0] == 1)
- smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
- else
+ if (size != 2) {
+ pr_err("Input parameter number not correct\n");
return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (input[1] < min_freq) {
+ pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], min_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_min_freq = input[1];
+ } else if (input[0] == 1) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (input[1] > max_freq) {
+ pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ } else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+ smu10_data->gfx_actual_soft_min_freq = min_freq;
+ smu10_data->gfx_actual_soft_max_freq = max_freq;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
+ pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ smu10_data->gfx_actual_soft_min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ smu10_data->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
index 6c9b5f060902..808e0ecbe1f0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
@@ -283,6 +283,7 @@ struct smu10_hwmgr {
uint32_t vclk_soft_min;
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
+ uint32_t gfx_actual_soft_max_freq;
uint32_t gfx_min_freq_limit;
uint32_t gfx_max_freq_limit; /* in 10Khz*/
@@ -299,6 +300,8 @@ struct smu10_hwmgr {
bool need_min_deep_sleep_dcefclk;
uint32_t deep_sleep_dcefclk;
uint32_t num_active_display;
+
+ bool fine_grain_enabled;
};
struct pp_hwmgr;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index cf999b7a2164..e84c737e3967 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -847,12 +847,10 @@ static int smu_sw_init(void *handle)
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
- if (!amdgpu_sriov_vf(adev) || (adev->asic_type != CHIP_NAVI12)) {
- ret = smu_init_microcode(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to load smu firmware!\n");
- return ret;
- }
+ ret = smu_init_microcode(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to load smu firmware!\n");
+ return ret;
}
ret = smu_smc_table_sw_init(smu);
@@ -2153,19 +2151,14 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
int ret = 0;
- uint32_t rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_rpm) {
- if (speed > 100)
- speed = 100;
- rpm = speed * smu->fan_max_rpm / 100;
- ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
- }
+ if (smu->ppt_funcs->set_fan_speed_percent)
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
mutex_unlock(&smu->mutex);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index cd7b411457ff..16db0b506b0d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2326,6 +2326,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 51e83123f72a..cd7efa923195 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2456,6 +2456,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 3f20f77afdd2..d68d3dfee51d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -128,6 +128,8 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(SetGpoFeaturePMask, PPSMC_MSG_SetGpoFeaturePMask, 0),
+ MSG_MAP(DisallowGpo, PPSMC_MSG_DisallowGpo, 0),
+ MSG_MAP(Enable2ndUSB20Port, PPSMC_MSG_Enable2ndUSB20Port, 0),
};
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
@@ -302,6 +304,9 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
@@ -377,7 +382,7 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -386,10 +391,10 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
+ sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
goto err0_out;
smu_table->metrics_time = 0;
@@ -418,7 +423,8 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table= &smu->smu_table;
- SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ SmuMetrics_t *metrics =
+ &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
int ret = 0;
mutex_lock(&smu->metrics_lock);
@@ -1065,12 +1071,18 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
+ pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
+ if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
+ pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
+ if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
+ pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
return 0;
}
@@ -1156,7 +1168,9 @@ static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *buf)
{
- DpmActivityMonitorCoeffInt_t activity_monitor;
+ DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ DpmActivityMonitorCoeffInt_t *activity_monitor =
+ &(activity_monitor_external.DpmActivityMonitorCoeffInt);
uint32_t i, size = 0;
int16_t workload_type = 0;
static const char *profile_name[] = {
@@ -1198,7 +1212,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
result = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
- (void *)(&activity_monitor), false);
+ (void *)(&activity_monitor_external), false);
if (result) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return result;
@@ -1211,43 +1225,43 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
" ",
0,
"GFXCLK",
- activity_monitor.Gfx_FPS,
- activity_monitor.Gfx_MinFreqStep,
- activity_monitor.Gfx_MinActiveFreqType,
- activity_monitor.Gfx_MinActiveFreq,
- activity_monitor.Gfx_BoosterFreqType,
- activity_monitor.Gfx_BoosterFreq,
- activity_monitor.Gfx_PD_Data_limit_c,
- activity_monitor.Gfx_PD_Data_error_coeff,
- activity_monitor.Gfx_PD_Data_error_rate_coeff);
+ activity_monitor->Gfx_FPS,
+ activity_monitor->Gfx_MinFreqStep,
+ activity_monitor->Gfx_MinActiveFreqType,
+ activity_monitor->Gfx_MinActiveFreq,
+ activity_monitor->Gfx_BoosterFreqType,
+ activity_monitor->Gfx_BoosterFreq,
+ activity_monitor->Gfx_PD_Data_limit_c,
+ activity_monitor->Gfx_PD_Data_error_coeff,
+ activity_monitor->Gfx_PD_Data_error_rate_coeff);
size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
1,
"SOCCLK",
- activity_monitor.Fclk_FPS,
- activity_monitor.Fclk_MinFreqStep,
- activity_monitor.Fclk_MinActiveFreqType,
- activity_monitor.Fclk_MinActiveFreq,
- activity_monitor.Fclk_BoosterFreqType,
- activity_monitor.Fclk_BoosterFreq,
- activity_monitor.Fclk_PD_Data_limit_c,
- activity_monitor.Fclk_PD_Data_error_coeff,
- activity_monitor.Fclk_PD_Data_error_rate_coeff);
+ activity_monitor->Fclk_FPS,
+ activity_monitor->Fclk_MinFreqStep,
+ activity_monitor->Fclk_MinActiveFreqType,
+ activity_monitor->Fclk_MinActiveFreq,
+ activity_monitor->Fclk_BoosterFreqType,
+ activity_monitor->Fclk_BoosterFreq,
+ activity_monitor->Fclk_PD_Data_limit_c,
+ activity_monitor->Fclk_PD_Data_error_coeff,
+ activity_monitor->Fclk_PD_Data_error_rate_coeff);
size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
"MEMLK",
- activity_monitor.Mem_FPS,
- activity_monitor.Mem_MinFreqStep,
- activity_monitor.Mem_MinActiveFreqType,
- activity_monitor.Mem_MinActiveFreq,
- activity_monitor.Mem_BoosterFreqType,
- activity_monitor.Mem_BoosterFreq,
- activity_monitor.Mem_PD_Data_limit_c,
- activity_monitor.Mem_PD_Data_error_coeff,
- activity_monitor.Mem_PD_Data_error_rate_coeff);
+ activity_monitor->Mem_FPS,
+ activity_monitor->Mem_MinFreqStep,
+ activity_monitor->Mem_MinActiveFreqType,
+ activity_monitor->Mem_MinActiveFreq,
+ activity_monitor->Mem_BoosterFreqType,
+ activity_monitor->Mem_BoosterFreq,
+ activity_monitor->Mem_PD_Data_limit_c,
+ activity_monitor->Mem_PD_Data_error_coeff,
+ activity_monitor->Mem_PD_Data_error_rate_coeff);
}
return size;
@@ -1255,7 +1269,10 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
{
- DpmActivityMonitorCoeffInt_t activity_monitor;
+
+ DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ DpmActivityMonitorCoeffInt_t *activity_monitor =
+ &(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
smu->power_profile_mode = input[size];
@@ -1269,7 +1286,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor), false);
+ (void *)(&activity_monitor_external), false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
@@ -1277,43 +1294,43 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
switch (input[0]) {
case 0: /* Gfxclk */
- activity_monitor.Gfx_FPS = input[1];
- activity_monitor.Gfx_MinFreqStep = input[2];
- activity_monitor.Gfx_MinActiveFreqType = input[3];
- activity_monitor.Gfx_MinActiveFreq = input[4];
- activity_monitor.Gfx_BoosterFreqType = input[5];
- activity_monitor.Gfx_BoosterFreq = input[6];
- activity_monitor.Gfx_PD_Data_limit_c = input[7];
- activity_monitor.Gfx_PD_Data_error_coeff = input[8];
- activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+ activity_monitor->Gfx_FPS = input[1];
+ activity_monitor->Gfx_MinFreqStep = input[2];
+ activity_monitor->Gfx_MinActiveFreqType = input[3];
+ activity_monitor->Gfx_MinActiveFreq = input[4];
+ activity_monitor->Gfx_BoosterFreqType = input[5];
+ activity_monitor->Gfx_BoosterFreq = input[6];
+ activity_monitor->Gfx_PD_Data_limit_c = input[7];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[8];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
break;
case 1: /* Socclk */
- activity_monitor.Fclk_FPS = input[1];
- activity_monitor.Fclk_MinFreqStep = input[2];
- activity_monitor.Fclk_MinActiveFreqType = input[3];
- activity_monitor.Fclk_MinActiveFreq = input[4];
- activity_monitor.Fclk_BoosterFreqType = input[5];
- activity_monitor.Fclk_BoosterFreq = input[6];
- activity_monitor.Fclk_PD_Data_limit_c = input[7];
- activity_monitor.Fclk_PD_Data_error_coeff = input[8];
- activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
+ activity_monitor->Fclk_FPS = input[1];
+ activity_monitor->Fclk_MinFreqStep = input[2];
+ activity_monitor->Fclk_MinActiveFreqType = input[3];
+ activity_monitor->Fclk_MinActiveFreq = input[4];
+ activity_monitor->Fclk_BoosterFreqType = input[5];
+ activity_monitor->Fclk_BoosterFreq = input[6];
+ activity_monitor->Fclk_PD_Data_limit_c = input[7];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[8];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
break;
case 2: /* Memlk */
- activity_monitor.Mem_FPS = input[1];
- activity_monitor.Mem_MinFreqStep = input[2];
- activity_monitor.Mem_MinActiveFreqType = input[3];
- activity_monitor.Mem_MinActiveFreq = input[4];
- activity_monitor.Mem_BoosterFreqType = input[5];
- activity_monitor.Mem_BoosterFreq = input[6];
- activity_monitor.Mem_PD_Data_limit_c = input[7];
- activity_monitor.Mem_PD_Data_error_coeff = input[8];
- activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+ activity_monitor->Mem_FPS = input[1];
+ activity_monitor->Mem_MinFreqStep = input[2];
+ activity_monitor->Mem_MinActiveFreqType = input[3];
+ activity_monitor->Mem_MinActiveFreq = input[4];
+ activity_monitor->Mem_BoosterFreqType = input[5];
+ activity_monitor->Mem_BoosterFreq = input[6];
+ activity_monitor->Mem_PD_Data_limit_c = input[7];
+ activity_monitor->Mem_PD_Data_error_coeff = input[8];
+ activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
break;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor), true);
+ (void *)(&activity_monitor_external), true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
@@ -2355,7 +2372,7 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t *req, bool write,
{
int i;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = 1;
req->I2CSpeed = 2;
req->SlaveAddress = address;
req->NumCmds = numbytes;
@@ -2582,52 +2599,54 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_0 *gpu_metrics =
(struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
- SmuMetrics_t metrics;
+ SmuMetricsExternal_t metrics_external;
+ SmuMetrics_t *metrics =
+ &(metrics_external.SmuMetrics);
int ret = 0;
ret = smu_cmn_get_metrics_table(smu,
- &metrics,
+ &metrics_external,
true);
if (ret)
return ret;
smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
- gpu_metrics->temperature_edge = metrics.TemperatureEdge;
- gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
- gpu_metrics->temperature_mem = metrics.TemperatureMem;
- gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
- gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
- gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+ gpu_metrics->temperature_edge = metrics->TemperatureEdge;
+ gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot;
+ gpu_metrics->temperature_mem = metrics->TemperatureMem;
+ gpu_metrics->temperature_vrgfx = metrics->TemperatureVrGfx;
+ gpu_metrics->temperature_vrsoc = metrics->TemperatureVrSoc;
+ gpu_metrics->temperature_vrmem = metrics->TemperatureVrMem0;
- gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
- gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
- gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
+ gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
+ gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
+ gpu_metrics->average_mm_activity = metrics->VcnActivityPercentage;
- gpu_metrics->average_socket_power = metrics.AverageSocketPower;
- gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
+ gpu_metrics->average_socket_power = metrics->AverageSocketPower;
+ gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
- if (metrics.AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
- gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
+ if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
+ gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
else
- gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
- gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
- gpu_metrics->average_vclk0_frequency = metrics.AverageVclk0Frequency;
- gpu_metrics->average_dclk0_frequency = metrics.AverageDclk0Frequency;
- gpu_metrics->average_vclk1_frequency = metrics.AverageVclk1Frequency;
- gpu_metrics->average_dclk1_frequency = metrics.AverageDclk1Frequency;
+ gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
+ gpu_metrics->average_uclk_frequency = metrics->AverageUclkFrequencyPostDs;
+ gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
+ gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
+ gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
+ gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
- gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
- gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
- gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
- gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK_0];
- gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK_0];
- gpu_metrics->current_vclk1 = metrics.CurrClock[PPCLK_VCLK_1];
- gpu_metrics->current_dclk1 = metrics.CurrClock[PPCLK_DCLK_1];
+ gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
+ gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
+ gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
+ gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
+ gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
+ gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
- gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->throttle_status = metrics->ThrottlerStatus;
- gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
+ gpu_metrics->current_fan_speed = metrics->CurrFanSpeed;
gpu_metrics->pcie_link_width =
smu_v11_0_get_current_pcie_link_width(smu);
@@ -2650,23 +2669,82 @@ static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
static int sienna_cichlid_gpo_control(struct smu_context *smu,
bool enablement)
{
+ uint32_t smu_version;
int ret = 0;
+
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
- if (enablement)
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetGpoFeaturePMask,
- GFX_GPO_PACE_MASK | GFX_GPO_DEM_MASK,
- NULL);
- else
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetGpoFeaturePMask,
- 0,
- NULL);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ if (enablement) {
+ if (smu_version < 0x003a2500) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetGpoFeaturePMask,
+ GFX_GPO_PACE_MASK | GFX_GPO_DEM_MASK,
+ NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DisallowGpo,
+ 0,
+ NULL);
+ }
+ } else {
+ if (smu_version < 0x003a2500) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetGpoFeaturePMask,
+ 0,
+ NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DisallowGpo,
+ 1,
+ NULL);
+ }
+ }
}
return ret;
}
+
+static int sienna_cichlid_notify_2nd_usb20_port(struct smu_context *smu)
+{
+ uint32_t smu_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ /*
+ * Message SMU_MSG_Enable2ndUSB20Port is supported by 58.45
+ * onwards PMFWs.
+ */
+ if (smu_version < 0x003A2D00)
+ return 0;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_Enable2ndUSB20Port,
+ smu->smu_table.boot_values.firmware_caps & ATOM_FIRMWARE_CAP_ENABLE_2ND_USB20PORT ?
+ 1 : 0,
+ NULL);
+}
+
+static int sienna_cichlid_system_features_control(struct smu_context *smu,
+ bool en)
+{
+ int ret = 0;
+
+ if (en) {
+ ret = sienna_cichlid_notify_2nd_usb20_port(smu);
+ if (ret)
+ return ret;
+ }
+
+ return smu_v11_0_system_features_control(smu, en);
+}
+
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -2707,7 +2785,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
- .system_features_control = smu_v11_0_system_features_control,
+ .system_features_control = sienna_cichlid_system_features_control,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.init_display_count = NULL,
@@ -2724,6 +2802,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
@@ -2740,6 +2819,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.run_btc = sienna_cichlid_run_btc,
+ .set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
index 57e120c440ea..38cd0ece24f6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
@@ -29,6 +29,10 @@ typedef enum {
POWER_SOURCE_COUNT,
} POWER_SOURCE_e;
+#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK 1825
+#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
+#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
+
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 624065d3c079..5aeb5f5a0447 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -91,6 +91,11 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
const struct common_firmware_header *header;
struct amdgpu_firmware_info *ucode = NULL;
+ if (amdgpu_sriov_vf(adev) &&
+ ((adev->asic_type == CHIP_NAVI12) ||
+ (adev->asic_type == CHIP_SIENNA_CICHLID)))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_ARCTURUS:
chip_name = "arcturus";
@@ -554,6 +559,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = 0;
+ smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
break;
case 3:
default:
@@ -569,6 +575,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
}
smu->smu_table.boot_values.format_revision = header->format_revision;
@@ -929,9 +936,13 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
if (power_src < 0)
return -EINVAL;
+ /*
+ * BIT 24-31: ControllerId (only PPT0 is supported for now)
+ * BIT 16-23: PowerSource
+ */
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetPptLimit,
- power_src << 16,
+ (0 << 24) | (power_src << 16),
power_limit);
if (ret)
dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
@@ -941,6 +952,7 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
+ int power_src;
int ret = 0;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
@@ -948,6 +960,22 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return -EOPNOTSUPP;
}
+ power_src = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ smu->adev->pm.ac_power ?
+ SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ /*
+ * BIT 24-31: ControllerId (only PPT0 is supported for now)
+ * BIT 16-23: PowerSource
+ * BIT 0-15: PowerLimit
+ */
+ n &= 0xFFFF;
+ n |= 0 << 24;
+ n |= (power_src) << 16;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
@@ -1146,6 +1174,35 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
}
int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (smu_v11_0_auto_fan_control(smu, 0))
+ return -EINVAL;
+
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
+int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
@@ -1153,7 +1210,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
switch (mode) {
case AMD_FAN_CTRL_NONE:
- ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
+ ret = smu_v11_0_set_fan_speed_percent(smu, 100);
break;
case AMD_FAN_CTRL_MANUAL:
ret = smu_v11_0_auto_fan_control(smu, 0);
@@ -2064,6 +2121,22 @@ int smu_v11_0_deep_sleep_control(struct smu_context *smu,
}
}
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
if (ret) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index a81e5c823211..92ad2cdbae10 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -64,7 +64,7 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
- MSG_MAP(Spare, PPSMC_MSG_spare, 0),
+ MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0),
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0),
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0),
MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0),
@@ -252,7 +252,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->CurrentSocketPower;
+ *value = (metrics->CurrentSocketPower << 8) /
+ 1000 ;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->GfxTemperature / 100 *
@@ -590,14 +591,17 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
gpu_metrics->average_cpu_power = metrics.Power[0];
gpu_metrics->average_soc_power = metrics.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Power[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 8);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
@@ -722,6 +726,17 @@ static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
return 0;
}
+static int vangogh_system_features_control(struct smu_context *smu, bool en)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->pm.fw_version >= 0x43f1700)
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
+ en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
+ else
+ return 0;
+}
+
static const struct pptable_funcs vangogh_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
@@ -749,6 +764,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.print_clk_levels = vangogh_print_fine_grain_clk,
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
+ .system_features_control = vangogh_system_features_control,
};
void vangogh_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
index 8756766296cd..eab455493076 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
@@ -32,4 +32,8 @@ extern void vangogh_set_ppt_funcs(struct smu_context *smu);
#define VANGOGH_UMD_PSTATE_SOCCLK 678
#define VANGOGH_UMD_PSTATE_FCLK 800
+/* RLC Power Status */
+#define RLC_STATUS_OFF 0
+#define RLC_STATUS_NORMAL 1
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index dc75db8af371..9a9697038016 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -188,6 +188,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level].Freq;
break;
+ case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= NUM_FCLK_DPM_LEVELS)
return -EINVAL;
@@ -1120,7 +1121,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
{
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL);
+ return 0;
}
static const struct pptable_funcs renoir_ppt_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 522d55004655..06abf2a7ce9e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -225,6 +225,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
break;
case SMU_FCLK:
case SMU_MCLK:
+ case SMU_UCLK:
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 1f8195bad536..ca891ae14d36 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -152,7 +152,6 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV)
return ret;
- ret = 0;
for_each_available_child_of_node(np, child) {
if (of_node_name_eq(child, "pipeline")) {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 6b99df696384..034ee08482e0 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -81,10 +81,10 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(dev, old_state);
- drm_atomic_helper_wait_for_flip_done(dev, old_state);
-
drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
+
drm_atomic_helper_cleanup_planes(dev, old_state);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index 452e505a1fd3..719a79728e24 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -137,9 +137,10 @@ komeda_pipeline_get_first_component(struct komeda_pipeline *pipe,
u32 comp_mask)
{
struct komeda_component *c = NULL;
+ unsigned long comp_mask_local = (unsigned long)comp_mask;
int id;
- id = find_first_bit((unsigned long *)&comp_mask, 32);
+ id = find_first_bit(&comp_mask_local, 32);
if (id < 32)
c = komeda_pipeline_get_component(pipe, id);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index 8f32ae7c25d0..5c085116de3f 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -704,10 +704,10 @@ komeda_compiz_set_input(struct komeda_compiz *compiz,
cin->layer_alpha = dflow->layer_alpha;
old_st = komeda_component_get_old_state(&compiz->base, drm_st);
- WARN_ON(!old_st);
/* compare with old to check if this input has been changed */
- if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
+ if (WARN_ON(!old_st) ||
+ memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
c_st->changed_active_inputs |= BIT(idx);
komeda_component_add_input(c_st, &dflow->input, idx);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 0c98d27f84ac..fee27952ec6d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <sound/hdmi-codec.h>
@@ -36,6 +37,7 @@ struct lt9611uxc {
struct mutex ocm_lock;
struct wait_queue_head wq;
+ struct work_struct work;
struct device_node *dsi0_node;
struct device_node *dsi1_node;
@@ -52,6 +54,8 @@ struct lt9611uxc {
bool hpd_supported;
bool edid_read;
+ /* can be accessed from different threads, so protect this with ocm_lock */
+ bool hdmi_connected;
uint8_t fw_version;
};
@@ -143,21 +147,41 @@ static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id)
if (irq_status)
regmap_write(lt9611uxc->regmap, 0xb022, 0);
- lt9611uxc_unlock(lt9611uxc);
-
- if (irq_status & BIT(0))
+ if (irq_status & BIT(0)) {
lt9611uxc->edid_read = !!(hpd_status & BIT(0));
+ wake_up_all(&lt9611uxc->wq);
+ }
if (irq_status & BIT(1)) {
- if (lt9611uxc->connector.dev)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- else
- drm_bridge_hpd_notify(&lt9611uxc->bridge, !!(hpd_status & BIT(1)));
+ lt9611uxc->hdmi_connected = hpd_status & BIT(1);
+ schedule_work(&lt9611uxc->work);
}
+ lt9611uxc_unlock(lt9611uxc);
+
return IRQ_HANDLED;
}
+static void lt9611uxc_hpd_work(struct work_struct *work)
+{
+ struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
+ bool connected;
+
+ if (lt9611uxc->connector.dev)
+ drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
+ else {
+
+ mutex_lock(&lt9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(&lt9611uxc->ocm_lock);
+
+ drm_bridge_hpd_notify(&lt9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
+ }
+}
+
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
{
gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
@@ -445,18 +469,21 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
int ret;
- int connected = 1;
+ bool connected = true;
+
+ lt9611uxc_lock(lt9611uxc);
if (lt9611uxc->hpd_supported) {
- lt9611uxc_lock(lt9611uxc);
ret = regmap_read(lt9611uxc->regmap, 0xb023, &reg_val);
- lt9611uxc_unlock(lt9611uxc);
if (ret)
dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret);
else
connected = reg_val & BIT(1);
}
+ lt9611uxc->hdmi_connected = connected;
+
+ lt9611uxc_unlock(lt9611uxc);
return connected ? connector_status_connected :
connector_status_disconnected;
@@ -465,7 +492,7 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc)
{
return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read,
- msecs_to_jiffies(100));
+ msecs_to_jiffies(500));
}
static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
@@ -503,7 +530,10 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
ret = lt9611uxc_wait_for_edid(lt9611uxc);
if (ret < 0) {
dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret);
- return ERR_PTR(ret);
+ return NULL;
+ } else if (ret == 0) {
+ dev_err(lt9611uxc->dev, "wait for EDID timeout\n");
+ return NULL;
}
return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
@@ -926,6 +956,8 @@ retry:
lt9611uxc->fw_version = ret;
init_waitqueue_head(&lt9611uxc->wq);
+ INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+
ret = devm_request_threaded_irq(dev, client->irq, NULL,
lt9611uxc_irq_thread_handler,
IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
@@ -962,6 +994,7 @@ static int lt9611uxc_remove(struct i2c_client *client)
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
disable_irq(client->irq);
+ flush_scheduled_work();
lt9611uxc_audio_exit(lt9611uxc);
drm_bridge_remove(&lt9611uxc->bridge);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ddd0e3239150..4a8cbec832bc 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -122,7 +122,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
continue;
if (funcs->atomic_best_encoder)
- new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
+ new_encoder = funcs->atomic_best_encoder(connector,
+ state);
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
@@ -345,8 +346,7 @@ update_connector_routing(struct drm_atomic_state *state,
funcs = connector->helper_private;
if (funcs->atomic_best_encoder)
- new_encoder = funcs->atomic_best_encoder(connector,
- new_connector_state);
+ new_encoder = funcs->atomic_best_encoder(connector, state);
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
@@ -1313,7 +1313,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
- funcs->atomic_commit(connector, new_conn_state);
+ funcs->atomic_commit(connector, old_state);
}
}
}
@@ -3021,7 +3021,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
ret = handle_conflicting_encoders(state, true);
if (ret)
- return ret;
+ goto fail;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index ae2234aae93d..5c2141e9a9f4 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -196,10 +196,10 @@
* exposed and assumed to be black).
*
* SCALING_FILTER:
- *
* Indicates scaling filter to be used for plane scaler
*
* The value of this property can be one of the following:
+ *
* Default:
* Driver's default scaling filter
* Nearest Neighbor:
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 7a01d0918861..aeb1327e3077 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -77,6 +77,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
if ((entry->map->offset & 0xffffffff) ==
(map->offset & 0xffffffff))
return entry;
+ break;
default: /* Make gcc happy */
;
}
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fe573acf1067..ce45e380f4a2 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -314,9 +314,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map
struct dma_buf_map *map = &buffer->map;
int ret;
- if (dma_buf_map_is_set(map))
- goto out;
-
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
@@ -329,7 +326,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map
if (ret)
return ret;
-out:
*map_copy = *map;
return 0;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f927976eca50..74090fc3aa55 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -230,14 +230,14 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
*
* Setting MODE_ID to 0 will release reserved resources for the CRTC.
* SCALING_FILTER:
- * Atomic property for setting the scaling filter for CRTC scaler
+ * Atomic property for setting the scaling filter for CRTC scaler
*
- * The value of this property can be one of the following:
- * Default:
- * Driver's default scaling filter
- * Nearest Neighbor:
- * Nearest Neighbor scaling filter
+ * The value of this property can be one of the following:
*
+ * Default:
+ * Driver's default scaling filter
+ * Nearest Neighbor:
+ * Nearest Neighbor scaling filter
*/
/**
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0401b2f47500..8781deefeae3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
+/**
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
+ * @link_rate: link rate in 10kbits/s units
+ * @link_lane_count: lane count
+ *
+ * Calculate the total bandwidth of a MultiStream Transport link. The returned
+ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
+ * convert the number of PBNs required for a given stream to the number of
+ * timeslots this stream requires in each MTP.
+ */
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
- if (dp_link_bw == 0 || dp_link_count == 0)
- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
- dp_link_bw, dp_link_count);
+ if (link_rate == 0 || link_lane_count == 0)
+ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
+ link_rate, link_lane_count);
- return dp_link_bw * dp_link_count / 2;
+ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
+ return link_rate * link_lane_count / 54000;
}
+EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
* drm_dp_read_mst_cap() - check whether or not a sink supports MST
@@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
if (mgr->pbn_div == 0) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 74f5a3197214..e95cce8e736d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3102,6 +3102,8 @@ static int drm_cvt_modes(struct drm_connector *connector,
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
switch (cvt->code[1] & 0x0c) {
+ /* default - because compiler doesn't see that we've enumerated all cases */
+ default:
case 0x00:
width = height * 4 / 3;
break;
@@ -3114,8 +3116,6 @@ static int drm_cvt_modes(struct drm_connector *connector,
case 0x0c:
width = height * 15 / 9;
break;
- default:
- unreachable();
}
for (j = 1; j < 5; j++) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 25edf670867c..4b8119510687 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -371,9 +371,9 @@ static void drm_fb_helper_resume_worker(struct work_struct *work)
console_unlock();
}
-static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
- struct drm_clip_rect *clip,
- struct dma_buf_map *dst)
+static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct dma_buf_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
unsigned int cpp = fb->format->cpp[0];
@@ -391,40 +391,86 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
}
}
-static void drm_fb_helper_dirty_work(struct work_struct *work)
+static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct dma_buf_map map, dst;
+ int ret;
+
+ /*
+ * We have to pin the client buffer to its current location while
+ * flushing the shadow buffer. In the general case, concurrent
+ * modesetting operations could try to move the buffer and would
+ * fail. The modeset has to be serialized by acquiring the reservation
+ * object of the underlying BO here.
+ *
+ * For fbdev emulation, we only have to protect against fbdev modeset
+ * operations. Nothing else will involve the client buffer's BO. So it
+ * is sufficient to acquire struct drm_fb_helper.lock here.
+ */
+ mutex_lock(&fb_helper->lock);
+
+ ret = drm_client_buffer_vmap(buffer, &map);
+ if (ret)
+ goto out;
+
+ dst = map;
+ drm_fb_helper_damage_blit_real(fb_helper, clip, &dst);
+
+ drm_client_buffer_vunmap(buffer);
+
+out:
+ mutex_unlock(&fb_helper->lock);
+
+ return ret;
+}
+
+static void drm_fb_helper_damage_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
- dirty_work);
- struct drm_clip_rect *clip = &helper->dirty_clip;
+ damage_work);
+ struct drm_device *dev = helper->dev;
+ struct drm_clip_rect *clip = &helper->damage_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
- struct dma_buf_map map;
int ret;
- spin_lock_irqsave(&helper->dirty_lock, flags);
+ spin_lock_irqsave(&helper->damage_lock, flags);
clip_copy = *clip;
clip->x1 = clip->y1 = ~0;
clip->x2 = clip->y2 = 0;
- spin_unlock_irqrestore(&helper->dirty_lock, flags);
+ spin_unlock_irqrestore(&helper->damage_lock, flags);
- /* call dirty callback only when it has been really touched */
- if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
-
- /* Generic fbdev uses a shadow buffer */
- if (helper->buffer) {
- ret = drm_client_buffer_vmap(helper->buffer, &map);
- if (ret)
- return;
- drm_fb_helper_dirty_blit_real(helper, &clip_copy, &map);
- }
+ /* Call damage handlers only if necessary */
+ if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2))
+ return;
- if (helper->fb->funcs->dirty)
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
- &clip_copy, 1);
+ if (helper->buffer) {
+ ret = drm_fb_helper_damage_blit(helper, &clip_copy);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ goto err;
+ }
- if (helper->buffer)
- drm_client_buffer_vunmap(helper->buffer);
+ if (helper->fb->funcs->dirty) {
+ ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ goto err;
}
+
+ return;
+
+err:
+ /*
+ * Restore damage clip rectangle on errors. The next run
+ * of the damage worker will perform the update.
+ */
+ spin_lock_irqsave(&helper->damage_lock, flags);
+ clip->x1 = min_t(u32, clip->x1, clip_copy.x1);
+ clip->y1 = min_t(u32, clip->y1, clip_copy.y1);
+ clip->x2 = max_t(u32, clip->x2, clip_copy.x2);
+ clip->y2 = max_t(u32, clip->y2, clip_copy.y2);
+ spin_unlock_irqrestore(&helper->damage_lock, flags);
}
/**
@@ -440,10 +486,10 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
{
INIT_LIST_HEAD(&helper->kernel_fb_list);
- spin_lock_init(&helper->dirty_lock);
+ spin_lock_init(&helper->damage_lock);
INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
- INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
- helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
+ INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work);
+ helper->damage_clip.x1 = helper->damage_clip.y1 = ~0;
mutex_init(&helper->lock);
helper->funcs = funcs;
helper->dev = dev;
@@ -579,7 +625,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
return;
cancel_work_sync(&fb_helper->resume_work);
- cancel_work_sync(&fb_helper->dirty_work);
+ cancel_work_sync(&fb_helper->damage_work);
info = fb_helper->fbdev;
if (info) {
@@ -614,30 +660,30 @@ static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
fb->funcs->dirty;
}
-static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
- u32 width, u32 height)
+static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
+ u32 width, u32 height)
{
struct drm_fb_helper *helper = info->par;
- struct drm_clip_rect *clip = &helper->dirty_clip;
+ struct drm_clip_rect *clip = &helper->damage_clip;
unsigned long flags;
if (!drm_fbdev_use_shadow_fb(helper))
return;
- spin_lock_irqsave(&helper->dirty_lock, flags);
+ spin_lock_irqsave(&helper->damage_lock, flags);
clip->x1 = min_t(u32, clip->x1, x);
clip->y1 = min_t(u32, clip->y1, y);
clip->x2 = max_t(u32, clip->x2, x + width);
clip->y2 = max_t(u32, clip->y2, y + height);
- spin_unlock_irqrestore(&helper->dirty_lock, flags);
+ spin_unlock_irqrestore(&helper->damage_lock, flags);
- schedule_work(&helper->dirty_work);
+ schedule_work(&helper->damage_work);
}
/**
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
* @info: fb_info struct pointer
- * @pagelist: list of dirty mmap framebuffer pages
+ * @pagelist: list of mmap framebuffer pages that have to be flushed
*
* This function is used as the &fb_deferred_io.deferred_io
* callback function for flushing the fbdev mmap writes.
@@ -662,7 +708,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
y1 = min / info->fix.line_length;
y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length),
info->var.yres);
- drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1);
+ drm_fb_helper_damage(info, 0, y1, info->var.xres, y2 - y1);
}
}
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
@@ -699,8 +745,7 @@ ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
ret = fb_sys_write(info, buf, count, ppos);
if (ret > 0)
- drm_fb_helper_dirty(info, 0, 0, info->var.xres,
- info->var.yres);
+ drm_fb_helper_damage(info, 0, 0, info->var.xres, info->var.yres);
return ret;
}
@@ -717,8 +762,7 @@ void drm_fb_helper_sys_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
sys_fillrect(info, rect);
- drm_fb_helper_dirty(info, rect->dx, rect->dy,
- rect->width, rect->height);
+ drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
@@ -733,8 +777,7 @@ void drm_fb_helper_sys_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
sys_copyarea(info, area);
- drm_fb_helper_dirty(info, area->dx, area->dy,
- area->width, area->height);
+ drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
@@ -749,8 +792,7 @@ void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image)
{
sys_imageblit(info, image);
- drm_fb_helper_dirty(info, image->dx, image->dy,
- image->width, image->height);
+ drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
@@ -765,8 +807,7 @@ void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
cfb_fillrect(info, rect);
- drm_fb_helper_dirty(info, rect->dx, rect->dy,
- rect->width, rect->height);
+ drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
@@ -781,8 +822,7 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
cfb_copyarea(info, area);
- drm_fb_helper_dirty(info, area->dx, area->dy,
- area->width, area->height);
+ drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
@@ -797,8 +837,7 @@ void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
cfb_imageblit(info, image);
- drm_fb_helper_dirty(info, image->dx, image->dy,
- image->width, image->height);
+ drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
@@ -1988,14 +2027,19 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
if (!fb_helper->dev)
return;
- if (fbi && fbi->fbdefio) {
- fb_deferred_io_cleanup(fbi);
- shadow = fbi->screen_buffer;
+ if (fbi) {
+ if (fbi->fbdefio)
+ fb_deferred_io_cleanup(fbi);
+ if (drm_fbdev_use_shadow_fb(fb_helper))
+ shadow = fbi->screen_buffer;
}
drm_fb_helper_fini(fb_helper);
- vfree(shadow);
+ if (shadow)
+ vfree(shadow);
+ else
+ drm_client_buffer_vunmap(fb_helper->buffer);
drm_client_framebuffer_delete(fb_helper->buffer);
}
@@ -2189,6 +2233,9 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
if (ret > 0)
*ppos += ret;
+ if (ret > 0)
+ drm_fb_helper_damage(info, 0, 0, info->var.xres_virtual, info->var.yres_virtual);
+
return ret ? ret : err;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 499189c48f0b..9825c378dfa6 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -51,13 +51,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (!obj)
return ERR_PTR(-ENOMEM);
+ shmem = to_drm_gem_shmem_obj(obj);
+
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
- if (private)
+ if (private) {
drm_gem_private_object_init(dev, obj, size);
- else
+ shmem->map_wc = false; /* dma-buf mappings use always writecombine */
+ } else {
ret = drm_gem_object_init(dev, obj, size);
+ }
if (ret)
goto err_free;
@@ -65,7 +69,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (ret)
goto err_release;
- shmem = to_drm_gem_shmem_obj(obj);
mutex_init(&shmem->pages_lock);
mutex_init(&shmem->vmap_lock);
INIT_LIST_HEAD(&shmem->madv_list);
@@ -284,7 +287,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct
if (ret)
goto err_zero_use;
- if (!shmem->map_cached)
+ if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
@@ -477,33 +480,6 @@ bool drm_gem_shmem_purge(struct drm_gem_object *obj)
EXPORT_SYMBOL(drm_gem_shmem_purge);
/**
- * drm_gem_shmem_create_object_cached - Create a shmem buffer object with
- * cached mappings
- * @dev: DRM device
- * @size: Size of the object to allocate
- *
- * By default, shmem buffer objects use writecombine mappings. This
- * function implements struct drm_driver.gem_create_object for shmem
- * buffer objects with cached mappings.
- *
- * Returns:
- * A struct drm_gem_shmem_object * on success or NULL negative on failure.
- */
-struct drm_gem_object *
-drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size)
-{
- struct drm_gem_shmem_object *shmem;
-
- shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!shmem)
- return NULL;
- shmem->map_cached = true;
-
- return &shmem->base;
-}
-EXPORT_SYMBOL(drm_gem_shmem_create_object_cached);
-
-/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @file: DRM file structure to create the dumb buffer for
* @dev: DRM device
@@ -626,7 +602,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- if (!shmem->map_cached)
+ if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops;
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 02ca22e90290..0b232a73c1b7 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -387,9 +387,16 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
if (gbo->vmap_use_count > 0)
goto out;
- ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
- if (ret)
- return ret;
+ /*
+ * VRAM helpers unmap the BO only on demand. So the previous
+ * page mapping might still be around. Only vmap if the there's
+ * no mapping present.
+ */
+ if (dma_buf_map_is_null(&gbo->map)) {
+ ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
+ if (ret)
+ return ret;
+ }
out:
++gbo->vmap_use_count;
@@ -577,6 +584,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
return;
ttm_bo_vunmap(bo, &gbo->map);
+ dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
}
static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index e6231947f987..a0cb746bcb0a 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -1163,7 +1163,14 @@ retry:
if (ret)
goto out;
- if (old_fb->format != fb->format) {
+ /*
+ * Only check the FOURCC format code, excluding modifiers. This is
+ * enough for all legacy drivers. Atomic drivers have their own
+ * checks in their ->atomic_check implementation, which will
+ * return -EINVAL if any hw or driver constraint is violated due
+ * to modifier changes.
+ */
+ if (old_fb->format->format != fb->format->format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;
goto out;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 6e74e6745eca..349146049849 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj);
- drm_syncobj_put(syncobj);
if (*fence) {
ret = dma_fence_chain_find_seqno(fence, point);
if (!ret)
- return 0;
+ goto out;
dma_fence_put(*fence);
} else {
ret = -EINVAL;
}
if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
- return ret;
+ goto out;
memset(&wait, 0, sizeof(wait));
wait.task = current;
@@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (wait.node.next)
drm_syncobj_remove_wait(syncobj, &wait);
+out:
+ drm_syncobj_put(syncobj);
+
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index bbd235473645..6d38c5c17f23 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -145,10 +145,8 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
- fput(vma->vm_file);
- get_file(etnaviv_obj->base.filp);
vma->vm_pgoff = 0;
- vma->vm_file = etnaviv_obj->base.filp;
+ vma_set_file(vma, etnaviv_obj->base.filp);
vma->vm_page_prot = vm_page_prot;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e5574e506a5c..6d9e81ea67f4 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -38,6 +38,7 @@ i915-y += i915_drv.o \
i915_config.o \
i915_irq.o \
i915_getparam.o \
+ i915_mitigations.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index a9439b415603..b3533a32f8ba 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1616,10 +1616,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
get_dsi_io_power_domains(i915,
enc_to_intel_dsi(encoder));
-
- if (crtc_state->dsc.compression_enable)
- intel_display_power_get(i915,
- intel_dsc_power_domain(crtc_state));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 92940a0c5ef8..dc13d1814d95 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -2754,13 +2754,15 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
int n_entries, ln;
u32 val;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
- /* The table does not have values for level 3 and level 9. */
- if (level >= n_entries || level == 3 || level == 9) {
+ if (level >= n_entries) {
drm_dbg_kms(&dev_priv->drm,
"DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 2);
- level = n_entries - 2;
+ level, n_entries - 1);
+ level = n_entries - 1;
}
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
@@ -2891,6 +2893,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
if (level >= n_entries)
@@ -3532,6 +3537,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
+static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_combo(i915, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(i915, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+}
+
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -3621,14 +3643,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
* the used lanes of the DDI.
*/
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
/*
* 7.g Configure and enable DDI_BUF_CTL
@@ -3713,19 +3728,12 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
@@ -4206,6 +4214,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, reg, val);
}
+ intel_ddi_power_up_lanes(encoder, crtc_state);
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 53a00cf3fa32..61be6bed9162 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -2309,7 +2309,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
*/
ret = i915_vma_pin_fence(vma);
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
vma = ERR_PTR(ret);
goto err;
}
@@ -2327,12 +2327,9 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- i915_gem_object_lock(vma->obj, NULL);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
- i915_gem_object_unpin_from_display_plane(vma);
- i915_gem_object_unlock(vma->obj);
-
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
@@ -4807,6 +4804,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
return plane_color_ctl;
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ce82d654d0f2..34d78c654df3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1436,6 +1436,9 @@ struct intel_dp {
bool ycbcr_444_to_420;
} dfp;
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
/* Display stream compression testing */
bool force_dsc_en;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2165398d2c7c..8a26307c4896 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1489,7 +1489,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
- cpu_latency_qos_update_request(&i915->pm_qos, 0);
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
intel_dp_check_edp(intel_dp);
@@ -1622,7 +1622,7 @@ done:
ret = recv_bytes;
out:
- cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
@@ -1898,6 +1898,9 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
+ if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+ cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
kfree(intel_dp->aux.name);
}
@@ -1950,6 +1953,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
+ cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
}
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
@@ -4010,7 +4014,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 tmp;
@@ -4029,8 +4034,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
enableddisabled(intel_dp->has_hdmi_sink));
- tmp = intel_dp->dfp.ycbcr_444_to_420 ?
- DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+ tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+ intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
@@ -4084,7 +4089,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
}
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, pipe_config);
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
@@ -4632,24 +4637,6 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
-
- drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
- train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
- drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT,
- train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
- " (max)" : "");
-
- intel_dp->set_signal_levels(intel_dp, crtc_state);
-}
-
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -5698,7 +5685,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
intel_dp_phy_pattern_update(intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index b871a09b6901..6620f9efdcbb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable);
@@ -95,9 +96,6 @@ void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 91d3979902d0..d8c6d7054d11 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -334,6 +334,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
}
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ char phy_name[10];
+
+ drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+ train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT,
+ train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+ " (max)" : "",
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+
+ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+ intel_dp->set_signal_levels(intel_dp, crtc_state);
+}
+
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -341,7 +362,7 @@ intel_dp_reset_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
}
@@ -355,7 +376,7 @@ intel_dp_update_link_train(struct intel_dp *intel_dp,
DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
int ret;
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
intel_dp->train_set, crtc_state->lane_count);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 86905aa24db7..6a1f76bd8c75 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -17,6 +17,9 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy);
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 0c8684634fca..3286b232be0b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -23,6 +23,7 @@
*
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
@@ -68,7 +69,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port,
- crtc_state->pbn, 0);
+ crtc_state->pbn,
+ drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count));
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -719,11 +722,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
- struct drm_connector_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+ struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index b2a4bbcfdcd2..b9d8825e2bb1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -2210,6 +2210,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
if (content_protection_type_changed) {
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_connector_get(&connector->base);
schedule_work(&hdcp->prop_work);
mutex_unlock(&hdcp->mutex);
}
@@ -2221,6 +2222,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
desired_and_not_enabled =
hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
mutex_unlock(&hdcp->mutex);
+ /*
+ * If HDCP already ENABLED and CP property is DESIRED, schedule
+ * prop_work to update correct CP property to user space.
+ */
+ if (!desired_and_not_enabled && !content_protection_type_changed) {
+ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ }
}
if (desired_and_not_enabled || content_protection_type_changed)
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index ad5cc13037ae..1c939f9c9bc9 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -297,13 +297,9 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
*/
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
{
- struct irq_desc *desc;
-
if (!HAS_LPE_AUDIO(dev_priv))
return;
- desc = irq_to_desc(dev_priv->lpe_audio.irq);
-
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 52b4f6193b4c..0095c8cac9b4 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
intel_frontbuffer_flip_complete(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
@@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return 0;
out_unpin:
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
out_pin_section:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 9f23bac0d792..d64fce1a17cb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -1650,16 +1650,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
val = pch_get_backlight(connector);
else
val = lpt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
if (cpu_mode) {
drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
- lpt_set_backlight(connector->base.state, panel->backlight.level);
+ lpt_set_backlight(connector->base.state, val);
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
@@ -1667,6 +1664,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
+ val = intel_panel_compute_brightness(connector, val);
+ panel->backlight.level = clamp(val, panel->backlight.min,
+ panel->backlight.max);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 019a2d6d807a..3da2544fa1c0 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -618,13 +618,19 @@ skl_program_scaler(struct intel_plane *plane,
/* Preoffset values for YUV to RGB Conversion */
#define PREOFF_YUV_TO_RGB_HI 0x1800
-#define PREOFF_YUV_TO_RGB_ME 0x1F00
+#define PREOFF_YUV_TO_RGB_ME 0x0000
#define PREOFF_YUV_TO_RGB_LO 0x1800
#define ROFF(x) (((x) & 0xffff) << 16)
#define GOFF(x) (((x) & 0xffff) << 0)
#define BOFF(x) (((x) & 0xffff) << 16)
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
static void
icl_program_input_csc(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -672,52 +678,7 @@ icl_program_input_csc(struct intel_plane *plane,
0x0, 0x7800, 0x7F10,
},
};
-
- /* Matrix for Limited Range to Full Range Conversion */
- static const u16 input_csc_matrix_lr[][9] = {
- /*
- * BT.601 Limted range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.596027,
- * 1.164384, -0.39175, -0.812813,
- * 1.164384, 2.017232, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT601] = {
- 0x7CC8, 0x7950, 0x0,
- 0x8D00, 0x7950, 0x9C88,
- 0x0, 0x7950, 0x6810,
- },
- /*
- * BT.709 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.792741,
- * 1.164384, -0.213249, -0.532909,
- * 1.164384, 2.112402, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT709] = {
- 0x7E58, 0x7950, 0x0,
- 0x8888, 0x7950, 0xADA8,
- 0x0, 0x7950, 0x6870,
- },
- /*
- * BT.2020 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164, 0.000, 1.678,
- * 1.164, -0.1873, -0.6504,
- * 1.164, 2.1417, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT2020] = {
- 0x7D70, 0x7950, 0x0,
- 0x8A68, 0x7950, 0xAC00,
- 0x0, 0x7950, 0x6890,
- },
- };
- const u16 *csc;
-
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- csc = input_csc_matrix[plane_state->hw.color_encoding];
- else
- csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
+ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
ROFF(csc[0]) | GOFF(csc[1]));
@@ -734,14 +695,8 @@ icl_program_input_csc(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- 0);
- else
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
intel_de_write_fw(dev_priv,
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index d52f9c177908..f94025ec603a 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_prepare(encoder, pipe_config);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
- /* Deassert reset */
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ /*
+ * Give the panel time to power-on and then deassert its reset.
+ * Depending on the VBT MIPI sequences version the deassert-seq
+ * may contain the necessary delay, intel_dsi_msleep() will skip
+ * the delay in that case. If there is no deassert-seq, then an
+ * unconditional msleep is used to give the panel time to power-on.
+ */
+ if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ } else {
+ msleep(intel_dsi->panel_on_delay);
+ }
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 0dd477e56573..04e9c04545ad 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -114,8 +114,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
if (ret)
return ret;
- fput(vma->vm_file);
- vma->vm_file = get_file(obj->base.filp);
+ vma_set_file(vma, obj->base.filp);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index fcce6909f201..3d435bfff764 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -387,48 +387,6 @@ err:
return vma;
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_vma *vma;
-
- if (list_empty(&obj->vma.list))
- return;
-
- mutex_lock(&i915->ggtt.vm.mutex);
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- }
- spin_unlock(&obj->vma.lock);
- mutex_unlock(&i915->ggtt.vm.mutex);
-
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- if (obj->mm.madv == I915_MADV_WILLNEED &&
- !atomic_read(&obj->mm.shrink_pin))
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
-}
-
-void
-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
-{
- /* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(vma->obj);
-
- i915_vma_unpin(vma);
-}
-
/**
* Moves a single object to the CPU read, and possibly write domain.
* @obj: object to act on
@@ -569,9 +527,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
else
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
i915_gem_object_unlock(obj);
if (write_domain)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b07dc1156a0e..bd3046e5a934 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -382,7 +382,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size - 1) >> 32)
+ (vma->node.start + vma->node.size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
@@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_pool;
}
+ memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
+
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 3d69e51f3e4d..ec28a6cde49b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -893,8 +893,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
* requires avoiding extraneous references to their filp, hence why
* we prefer to use an anonymous file for their mmaps.
*/
- fput(vma->vm_file);
- vma->vm_file = anon;
+ vma_set_file(vma, anon);
+ /* Drop the initial creation reference, the vma is now holding one. */
+ fput(anon);
switch (mmo->mmap_type) {
case I915_MMAP_TYPE_WC:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index be14486f63a7..4556afe18f16 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -486,7 +486,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index d93d85cd3027..e961ad6a3129 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -7,8 +7,6 @@
#include "i915_drv.h"
#include "intel_gpu_commands.h"
-#define MAX_URB_ENTRIES 64
-#define STATE_SIZE (4 * 1024)
#define GT3_INLINE_DATA_DELAYS 0x1E00
#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
@@ -34,38 +32,59 @@ struct batch_chunk {
};
struct batch_vals {
- u32 max_primitives;
- u32 max_urb_entries;
- u32 cmd_size;
- u32 state_size;
+ u32 max_threads;
u32 state_start;
- u32 batch_size;
+ u32 surface_start;
u32 surface_height;
u32 surface_width;
- u32 scratch_size;
- u32 max_size;
+ u32 size;
};
+static inline int num_primitives(const struct batch_vals *bv)
+{
+ /*
+ * We need to saturate the GPU with work in order to dispatch
+ * a shader on every HW thread, and clear the thread-local registers.
+ * In short, we have to dispatch work faster than the shaders can
+ * run in order to fill the EU and occupy each HW thread.
+ */
+ return bv->max_threads;
+}
+
static void
batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
{
if (IS_HASWELL(i915)) {
- bv->max_primitives = 280;
- bv->max_urb_entries = MAX_URB_ENTRIES;
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1:
+ bv->max_threads = 70;
+ break;
+ case 2:
+ bv->max_threads = 140;
+ break;
+ case 3:
+ bv->max_threads = 280;
+ break;
+ }
bv->surface_height = 16 * 16;
bv->surface_width = 32 * 2 * 16;
} else {
- bv->max_primitives = 128;
- bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1: /* including vlv */
+ bv->max_threads = 36;
+ break;
+ case 2:
+ bv->max_threads = 128;
+ break;
+ }
bv->surface_height = 16 * 8;
bv->surface_width = 32 * 16;
}
- bv->cmd_size = bv->max_primitives * 4096;
- bv->state_size = STATE_SIZE;
- bv->state_start = bv->cmd_size;
- bv->batch_size = bv->cmd_size + bv->state_size;
- bv->scratch_size = bv->surface_height * bv->surface_width;
- bv->max_size = bv->batch_size + bv->scratch_size;
+ bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
+ bv->surface_start = bv->state_start + SZ_4K;
+ bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
}
static void batch_init(struct batch_chunk *bc,
@@ -155,7 +174,8 @@ static u32
gen7_fill_binding_table(struct batch_chunk *state,
const struct batch_vals *bv)
{
- u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+ u32 surface_start =
+ gen7_fill_surface_state(state, bv->surface_start, bv);
u32 *cs = batch_alloc_items(state, 32, 8);
u32 offset = batch_offset(state, cs);
@@ -214,9 +234,9 @@ static void
gen7_emit_state_base_address(struct batch_chunk *batch,
u32 surface_state_base)
{
- u32 *cs = batch_alloc_items(batch, 0, 12);
+ u32 *cs = batch_alloc_items(batch, 0, 10);
- *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+ *cs++ = STATE_BASE_ADDRESS | (10 - 2);
/* general */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* surface */
@@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
*cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = BASE_ADDRESS_MODIFY;
- *cs++ = 0;
- *cs++ = 0;
batch_advance(batch, cs);
}
@@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
u32 urb_size, u32 curbe_size,
u32 mode)
{
- u32 urb_entries = bv->max_urb_entries;
- u32 threads = bv->max_primitives - 1;
+ u32 threads = bv->max_threads - 1;
u32 *cs = batch_alloc_items(batch, 32, 8);
*cs++ = MEDIA_VFE_STATE | (8 - 2);
@@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
*cs++ = 0;
/* number of threads & urb entries for GPGPU vs Media Mode */
- *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+ *cs++ = threads << 16 | 1 << 8 | mode << 2;
*cs++ = 0;
@@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
{
unsigned int x_offset = (media_object_index % 16) * 64;
unsigned int y_offset = (media_object_index / 16) * 16;
- unsigned int inline_data_size;
- unsigned int media_batch_size;
- unsigned int i;
+ unsigned int pkt = 6 + 3;
u32 *cs;
- inline_data_size = 112 * 8;
- media_batch_size = inline_data_size + 6;
-
- cs = batch_alloc_items(batch, 8, media_batch_size);
+ cs = batch_alloc_items(batch, 8, pkt);
- *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+ *cs++ = MEDIA_OBJECT | (pkt - 2);
/* interface descriptor offset */
*cs++ = 0;
@@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch,
*cs++ = 0;
/* inline */
- *cs++ = (y_offset << 16) | (x_offset);
+ *cs++ = y_offset << 16 | x_offset;
*cs++ = 0;
*cs++ = GT3_INLINE_DATA_DELAYS;
- for (i = 3; i < inline_data_size; i++)
- *cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
{
- u32 *cs = batch_alloc_items(batch, 0, 5);
+ u32 *cs = batch_alloc_items(batch, 0, 4);
- *cs++ = GFX_OP_PIPE_CONTROL(5);
- *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
- PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 8);
+
+ /* ivb: Stall before STATE_CACHE_INVALIDATE */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0;
+ *cs++ = 0;
+
batch_advance(batch, cs);
}
@@ -344,34 +375,46 @@ static void emit_batch(struct i915_vma * const vma,
const struct batch_vals *bv)
{
struct drm_i915_private *i915 = vma->vm->i915;
- unsigned int desc_count = 64;
- const u32 urb_size = 112;
+ const unsigned int desc_count = 1;
+ const unsigned int urb_size = 1;
struct batch_chunk cmds, state;
- u32 interface_descriptor;
+ u32 descriptors;
unsigned int i;
- batch_init(&cmds, vma, start, 0, bv->cmd_size);
- batch_init(&state, vma, start, bv->state_start, bv->state_size);
-
- interface_descriptor =
- gen7_fill_interface_descriptor(&state, bv,
- IS_HASWELL(i915) ?
- &cb_kernel_hsw :
- &cb_kernel_ivb,
- desc_count);
+ batch_init(&cmds, vma, start, 0, bv->state_start);
+ batch_init(&state, vma, start, bv->state_start, SZ_4K);
+
+ descriptors = gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+
+ /* Reset inherited context registers */
+ gen7_emit_pipeline_invalidate(&cmds);
+ batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+ batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
gen7_emit_pipeline_flush(&cmds);
+
+ /* Switch to the media pipeline and our base address */
+ gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
- gen7_emit_state_base_address(&cmds, interface_descriptor);
+ gen7_emit_pipeline_invalidate(&cmds);
+
gen7_emit_pipeline_flush(&cmds);
+ gen7_emit_state_base_address(&cmds, descriptors);
+ gen7_emit_pipeline_invalidate(&cmds);
+ /* Set the clear-residual kernel state */
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+ gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
- gen7_emit_interface_descriptor_load(&cmds,
- interface_descriptor,
- desc_count);
-
- for (i = 0; i < bv->max_primitives; i++)
+ /* Execute the kernel on all HW threads */
+ for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
batch_add(&cmds, MI_BATCH_BUFFER_END);
@@ -385,15 +428,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
batch_get_defaults(engine->i915, &bv);
if (!vma)
- return bv.max_size;
+ return bv.size;
- GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+ GEM_BUG_ON(vma->obj->base.size < bv.size);
batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(batch))
return PTR_ERR(batch);
- emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+ emit_batch(vma, memset(batch, 0, bv.size), &bv);
i915_gem_object_flush_map(vma->obj);
__i915_gem_object_release_map(vma->obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index a24cc1ff08a0..1d1757584f49 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
return true;
}
-static inline bool __request_completed(const struct i915_request *rq)
-{
- return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
-}
-
__maybe_unused static bool
check_signal_order(struct intel_context *ce, struct i915_request *rq)
{
@@ -192,18 +187,6 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(b->irq_engine, tl);
}
-static bool __signal_request(struct i915_request *rq)
-{
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
-
- if (!__dma_fence_signal(&rq->fence)) {
- i915_request_put(rq);
- return false;
- }
-
- return true;
-}
-
static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
@@ -257,7 +240,7 @@ static void signal_irq_work(struct irq_work *work)
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
bool release;
- if (!__request_completed(rq))
+ if (!__i915_request_is_complete(rq))
break;
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
@@ -274,9 +257,11 @@ static void signal_irq_work(struct irq_work *work)
release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
- if (__signal_request(rq))
+ if (__dma_fence_signal(&rq->fence))
/* We own signal_node now, xfer to local list */
signal = slist_add(&rq->signal_node, signal);
+ else
+ i915_request_put(rq);
if (release) {
add_retire(b, ce->timeline);
@@ -363,6 +348,17 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
kfree(b);
}
+static void irq_signal_request(struct i915_request *rq,
+ struct intel_breadcrumbs *b)
+{
+ if (!__dma_fence_signal(&rq->fence))
+ return;
+
+ i915_request_get(rq);
+ if (llist_add(&rq->signal_node, &b->signaled_requests))
+ irq_work_queue(&b->irq_work);
+}
+
static void insert_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
@@ -372,17 +368,13 @@ static void insert_breadcrumb(struct i915_request *rq)
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
- i915_request_get(rq);
-
/*
* If the request is already completed, we can transfer it
* straight onto a signaled list, and queue the irq worker for
* its signal completion.
*/
- if (__request_completed(rq)) {
- if (__signal_request(rq) &&
- llist_add(&rq->signal_node, &b->signaled_requests))
- irq_work_queue(&b->irq_work);
+ if (__i915_request_is_complete(rq)) {
+ irq_signal_request(rq, b);
return;
}
@@ -413,6 +405,8 @@ static void insert_breadcrumb(struct i915_request *rq)
break;
}
}
+
+ i915_request_get(rq);
list_add_rcu(&rq->signal_link, pos);
GEM_BUG_ON(!check_signal_order(ce, rq));
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
@@ -453,19 +447,25 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
struct intel_context *ce = rq->context;
bool release;
- if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ spin_lock(&ce->signal_lock);
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+ spin_unlock(&ce->signal_lock);
return;
+ }
- spin_lock(&ce->signal_lock);
list_del_rcu(&rq->signal_link);
- release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+ release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
if (release)
intel_context_put(ce);
+ if (__i915_request_is_complete(rq))
+ irq_signal_request(rq, b);
+
i915_request_put(rq);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index cf94525be2c1..db8c66dde655 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -526,16 +526,39 @@ static int init_ggtt(struct i915_ggtt *ggtt)
mutex_init(&ggtt->error_mutex);
if (ggtt->mappable_end) {
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
- &ggtt->error_capture,
- PAGE_SIZE, 0,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ /*
+ * Reserve a mappable slot for our lockless error capture.
+ *
+ * We strongly prefer taking address 0x0 in order to protect
+ * other critical buffers against accidental overwrites,
+ * as writing to address 0 is a very common mistake.
+ *
+ * Since 0 may already be in use by the system (e.g. the BIOS
+ * framebuffer), we let the reservation fail quietly and hope
+ * 0 remains reserved always.
+ *
+ * If we fail to reserve 0, and then fail to find any space
+ * for an error-capture, remain silent. We can afford not
+ * to reserve an error_capture node as we have fallback
+ * paths, and we trust that 0 will remain reserved. However,
+ * the only likely reason for failure to insert is a driver
+ * bug, which we expect to cause other failures...
+ */
+ ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
+ ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
+ if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
+ drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ ggtt->error_capture.size, 0,
+ ggtt->error_capture.color,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
}
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Reserved GGTT:[%llx, %llx] for use by error capture\n",
+ ggtt->error_capture.start,
+ ggtt->error_capture.start + ggtt->error_capture.size);
/*
* The upper portion of the GuC address space has a sizeable hole
@@ -548,9 +571,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- drm_dbg_kms(&ggtt->vm.i915->drm,
- "clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
+ drm_dbg(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7614a3d24fca..26c7d0a50585 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -3988,6 +3988,9 @@ err:
static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+
+ /* Called on error unwind, clear all flags to prevent further use */
+ memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index a41b43f445b8..ecf3a6118a6d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -32,6 +32,7 @@
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
+#include "i915_mitigations.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_gt.h"
@@ -886,7 +887,8 @@ static int switch_context(struct i915_request *rq)
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
- if (engine->wa_ctx.vma->private != ce) {
+ if (engine->wa_ctx.vma->private != ce &&
+ i915_mitigate_clear_residuals()) {
ret = clear_residuals(rq);
if (ret)
return ret;
@@ -1290,7 +1292,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
- if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+ if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
err = gen7_ctx_switch_bb_init(engine);
if (err)
goto err_ring_unpin;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 7ea94d201fe6..8015964043eb 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
struct intel_timeline_cacheline *cl =
container_of(rcu, typeof(*cl), rcu);
+ /* Must wait until after all *rq->hwsp are complete before removing */
+ i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+ __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
i915_active_fini(&cl->active);
kfree(cl);
}
@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
-
- i915_gem_object_unpin_map(cl->hwsp->vma->obj);
- i915_vma_put(cl->hwsp->vma);
- __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
call_rcu(&cl->rcu, __rcu_cacheline_free);
}
@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
return ERR_CAST(vaddr);
}
- i915_vma_get(hwsp->vma);
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 9bbe8a795cb8..c92f2c056db4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -134,7 +134,7 @@ static int remove_buf_file_callback(struct dentry *dentry)
}
/* relay channel callbacks */
-static struct rchan_callbacks relay_callbacks = {
+static const struct rchan_callbacks relay_callbacks = {
.subbuf_start = subbuf_start_callback,
.create_buf_file = create_buf_file_callback,
.remove_buf_file = remove_buf_file_callback,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 180c23e2e25e..602f1a0bc587 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a15f87539657..62a5b0dd2003 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
DDI_BUF_CTL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
}
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
+ /* No hpd_invert set in vgpu vbt, need to clear invert mask */
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
@@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
@@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
@@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
}
@@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
PORTD_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
} else if (IS_BROXTON(i915)) {
- if (connected) {
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+ } else {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
+ ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
- vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
- SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTA_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
- }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
- SFUSE_STRAP_DDIC_DETECTED;
- vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
- }
- } else {
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ SFUSE_STRAP_DDIB_DETECTED;
+ } else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
- ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
- }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
- ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
- vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
- ~SFUSE_STRAP_DDIC_DETECTED;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTB_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+ SFUSE_STRAP_DDIC_DETECTED;
+ } else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+ ~SFUSE_STRAP_DDIC_DETECTED;
}
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTC_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
}
- vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
- PORTB_HOTPLUG_STATUS_MASK;
- intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index e49944fde333..cbe5931906e0 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
- /* FixMe: Re-enable APL/BXT once vfio_edid enabled */
- else if (!IS_BROXTON(dev_priv))
+ else
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 10a865f3dc09..9ed19b8bca60 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -631,24 +631,26 @@ static int flush_lazy_signals(struct i915_active *ref)
int __i915_active_wait(struct i915_active *ref, int state)
{
- int err;
-
might_sleep();
- if (!i915_active_acquire_if_busy(ref))
- return 0;
-
/* Any fence added after the wait begins will not be auto-signaled */
- err = flush_lazy_signals(ref);
- i915_active_release(ref);
- if (err)
- return err;
+ if (i915_active_acquire_if_busy(ref)) {
+ int err;
- if (!i915_active_is_idle(ref) &&
- ___wait_var_event(ref, i915_active_is_idle(ref),
- state, 0, 0, schedule()))
- return -EINTR;
+ err = flush_lazy_signals(ref);
+ i915_active_release(ref);
+ if (err)
+ return err;
+ if (___wait_var_event(ref, i915_active_is_idle(ref),
+ state, 0, 0, schedule()))
+ return -EINTR;
+ }
+
+ /*
+ * After the wait is complete, the caller may free the active.
+ * We have to flush any concurrent retirement before returning.
+ */
flush_work(&ref->work);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 93265951fdbb..b0899b665e85 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
- unsigned long x, n;
+ unsigned long x, n, remain;
void *ptr;
/*
@@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
+ remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
- length = round_up(length,
+ remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
- for (n = offset >> PAGE_SHIFT; length; n++) {
- int len = min(length, PAGE_SIZE - x);
+ for (n = offset >> PAGE_SHIFT; remain; n++) {
+ int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src);
ptr += len;
- length -= len;
+ remain -= len;
x = 0;
}
}
i915_gem_object_unpin_pages(src_obj);
+ memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
+
/* dst_obj is returned with vmap pinned */
return dst;
}
@@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2
-static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
/**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
@@ -1538,16 +1536,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */
}
}
-
- if (shadow_needs_clflush(shadow->obj))
- drm_clflush_virt_range(batch_end, 8);
}
- if (shadow_needs_clflush(shadow->obj)) {
- void *ptr = page_mask_bits(shadow->obj->mm.mapping);
-
- drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
- }
+ i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 320856b665a1..99eb0d7bbc44 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -578,8 +578,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -626,7 +624,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@@ -648,8 +645,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
-
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**
@@ -1052,6 +1047,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+
i915_gem_suspend(i915);
drm_kms_helper_poll_disable(&i915->drm);
@@ -1065,6 +1062,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 15be8debae54..c6964f82a1bb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,9 +891,6 @@ struct drm_i915_private {
bool display_irqs_enabled;
- /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
- struct pm_qos_request pm_qos;
-
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;
@@ -1349,7 +1346,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
const unsigned int pi = __platform_mask_index(info, p);
- return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
}
static __always_inline bool
@@ -1579,9 +1576,9 @@ static inline const struct i915_rev_steppings *
tgl_revids_get(struct drm_i915_private *dev_priv)
{
if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
- return tgl_uy_revids;
+ return &tgl_uy_revids[INTEL_REVID(dev_priv)];
else
- return tgl_revids;
+ return &tgl_revids[INTEL_REVID(dev_priv)];
}
#define IS_TGL_DISP_REVID(p, since, until) \
@@ -1591,14 +1588,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_TGL_UY_GT_REVID(p, since, until) \
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_uy_revids->gt_stepping >= (since) && \
- tgl_uy_revids->gt_stepping <= (until))
+ tgl_uy_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
+ tgl_uy_revids[INTEL_REVID(p)].gt_stepping <= (until))
#define IS_TGL_GT_REVID(p, since, until) \
(IS_TIGERLAKE(p) && \
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids->gt_stepping >= (since) && \
- tgl_revids->gt_stepping <= (until))
+ tgl_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
+ tgl_revids[INTEL_REVID(p)].gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dc6febc63f1c..6cdb052e3850 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -60,6 +60,24 @@
* and related files, but that will be described in separate chapters.
*/
+/*
+ * Interrupt statistic for PMU. Increments the counter only if the
+ * interrupt originated from the the GPU so interrupts from a device which
+ * shares the interrupt line are not accounted.
+ */
+static inline void pmu_irq_stats(struct drm_i915_private *i915,
+ irqreturn_t res)
+{
+ if (unlikely(res != IRQ_HANDLED))
+ return;
+
+ /*
+ * A clever compiler translates that into INC. A not so clever one
+ * should at least prevent store tearing.
+ */
+ WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
+}
+
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
enum hpd_pin pin);
@@ -1668,6 +1686,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, ret);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -1745,6 +1765,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, ret);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -2155,6 +2177,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (sde_ier)
raw_reg_write(regs, SDEIER, sde_ier);
+ pmu_irq_stats(i915, ret);
+
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -2541,6 +2565,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs);
+ pmu_irq_stats(dev_priv, IRQ_HANDLED);
+
return IRQ_HANDLED;
}
@@ -2636,6 +2662,8 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+ pmu_irq_stats(i915, IRQ_HANDLED);
+
return IRQ_HANDLED;
}
@@ -3934,6 +3962,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, ret);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -4043,6 +4073,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, ret);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -4189,6 +4221,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, IRQ_HANDLED);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -4242,18 +4276,21 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
- if (HAS_PCH_DG1(dev_priv))
- dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
- else if (INTEL_GEN(dev_priv) >= 11)
- dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
- else if (IS_GEN9_LP(dev_priv))
- dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
- else if (HAS_GMCH(dev_priv) && I915_HAS_HOTPLUG(dev_priv))
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- else
- dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+ if (HAS_GMCH(dev_priv)) {
+ if (I915_HAS_HOTPLUG(dev_priv))
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else {
+ if (HAS_PCH_DG1(dev_priv))
+ dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
+ else if (IS_GEN9_LP(dev_priv))
+ dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
+ dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+ else
+ dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+ }
}
/**
diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c
new file mode 100644
index 000000000000..84f12598d145
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mitigations.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "i915_mitigations.h"
+
+static unsigned long mitigations __read_mostly = ~0UL;
+
+enum {
+ CLEAR_RESIDUALS = 0,
+};
+
+static const char * const names[] = {
+ [CLEAR_RESIDUALS] = "residuals",
+};
+
+bool i915_mitigate_clear_residuals(void)
+{
+ return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
+}
+
+static int mitigations_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned long new = ~0UL;
+ char *str, *sep, *tok;
+ bool first = true;
+ int err = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
+
+ str = kstrdup(val, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ for (sep = str; (tok = strsep(&sep, ","));) {
+ bool enable = true;
+ int i;
+
+ /* Be tolerant of leading/trailing whitespace */
+ tok = strim(tok);
+
+ if (first) {
+ first = false;
+
+ if (!strcmp(tok, "auto"))
+ continue;
+
+ new = 0;
+ if (!strcmp(tok, "off"))
+ continue;
+ }
+
+ if (*tok == '!') {
+ enable = !enable;
+ tok++;
+ }
+
+ if (!strncmp(tok, "no", 2)) {
+ enable = !enable;
+ tok += 2;
+ }
+
+ if (*tok == '\0')
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ if (!strcmp(tok, names[i])) {
+ if (enable)
+ new |= BIT(i);
+ else
+ new &= ~BIT(i);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(names)) {
+ pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
+ DRIVER_NAME, val, tok);
+ err = -EINVAL;
+ break;
+ }
+ }
+ kfree(str);
+ if (err)
+ return err;
+
+ WRITE_ONCE(mitigations, new);
+ return 0;
+}
+
+static int mitigations_get(char *buffer, const struct kernel_param *kp)
+{
+ unsigned long local = READ_ONCE(mitigations);
+ int count, i;
+ bool enable;
+
+ if (!local)
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
+
+ if (local & BIT(BITS_PER_LONG - 1)) {
+ count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
+ enable = false;
+ } else {
+ enable = true;
+ count = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ if ((local & BIT(i)) != enable)
+ continue;
+
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
+ "%s%s,", enable ? "" : "!", names[i]);
+ }
+
+ buffer[count - 1] = '\n';
+ return count;
+}
+
+static const struct kernel_param_ops ops = {
+ .set = mitigations_set,
+ .get = mitigations_get,
+};
+
+module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
+MODULE_PARM_DESC(mitigations,
+"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
+"\n"
+" auto -- enables all mitigations required for the platform [default]\n"
+" off -- disables all mitigations\n"
+"\n"
+"Individual mitigations can be enabled by passing a comma-separated string,\n"
+"e.g. mitigations=residuals to enable only clearing residuals or\n"
+"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
+"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
+"disabling it.\n"
+"\n"
+"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
+" residuals -- clear all thread-local registers between contexts"
+);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.h b/drivers/gpu/drm/i915/i915_mitigations.h
new file mode 100644
index 000000000000..1359d8135287
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mitigations.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_MITIGATIONS_H__
+#define __I915_MITIGATIONS_H__
+
+#include <linux/types.h>
+
+bool i915_mitigate_clear_residuals(void);
+
+#endif /* __I915_MITIGATIONS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3b12c8ff7182..649c26518d26 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -914,7 +914,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, oastatus_reg,
GEN8_OASTATUS_COUNTER_OVERFLOW |
GEN8_OASTATUS_REPORT_LOST,
- IS_GEN_RANGE(uncore->i915, 8, 10) ?
+ IS_GEN_RANGE(uncore->i915, 8, 11) ?
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index cd786ad12be7..9856479b56d8 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -4,7 +4,6 @@
* Copyright © 2017-2018 Intel Corporation
*/
-#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include "gt/intel_engine.h"
@@ -185,13 +184,24 @@ static u64 get_rc6(struct intel_gt *gt)
return val;
}
-static void park_rc6(struct drm_i915_private *i915)
+static void init_rc6(struct i915_pmu *pmu)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ intel_wakeref_t wakeref;
- if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
+ pmu->sample[__I915_SAMPLE_RC6].cur;
+ pmu->sleep_last = ktime_get();
+ }
+}
+
+static void park_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sleep_last = ktime_get();
}
@@ -202,6 +212,7 @@ static u64 get_rc6(struct intel_gt *gt)
return __get_rc6(gt);
}
+static void init_rc6(struct i915_pmu *pmu) { }
static void park_rc6(struct drm_i915_private *i915) {}
#endif
@@ -424,22 +435,6 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
-static u64 count_interrupts(struct drm_i915_private *i915)
-{
- /* open-coded kstat_irqs() */
- struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
- u64 sum = 0;
- int cpu;
-
- if (!desc || !desc->kstat_irqs)
- return 0;
-
- for_each_possible_cpu(cpu)
- sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
-
- return sum;
-}
-
static void i915_pmu_event_destroy(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -590,7 +585,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
- val = count_interrupts(i915);
+ val = READ_ONCE(pmu->irq_count);
break;
case I915_PMU_RC6_RESIDENCY:
val = get_rc6(&i915->gt);
@@ -629,10 +624,8 @@ static void i915_pmu_enable(struct perf_event *event)
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
- intel_wakeref_t wakeref;
unsigned long flags;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
@@ -643,13 +636,6 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
- if (pmu->enable_count[bit] == 0 &&
- config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
- pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
- pmu->sleep_last = ktime_get();
- }
-
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
@@ -690,8 +676,6 @@ static void i915_pmu_enable(struct perf_event *event)
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
@@ -1147,6 +1131,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
pmu->cpuhp.cpu = -1;
+ init_rc6(pmu);
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index a24885ab415c..8405d6da5b9a 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -112,6 +112,14 @@ struct i915_pmu {
*/
ktime_t sleep_last;
/**
+ * @irq_count: Number of interrupts
+ *
+ * Intentionally unsigned long to avoid atomics or heuristics on 32bit.
+ * 4e9 interrupts are a lot and postprocessing can really deal with an
+ * occasional wraparound easily. It's 32bit after all.
+ */
+ unsigned long irq_count;
+ /**
* @events_attr_group: Device events attribute group.
*/
struct attribute_group events_attr_group;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 620b6fab2c5c..92adfee30c7c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
static inline bool __i915_request_has_started(const struct i915_request *rq)
{
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
}
/**
@@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
*/
static inline bool i915_request_started(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- /* Remember: started but may have since been preempted! */
- return __i915_request_has_started(rq);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ /* Remember: started but may have since been preempted! */
+ result = __i915_request_has_started(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
@@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
*/
static inline bool i915_request_is_running(const struct i915_request *rq)
{
+ bool result;
+
if (!i915_request_is_active(rq))
return false;
- return __i915_request_has_started(rq);
+ rcu_read_lock();
+ result = __i915_request_has_started(rq) && i915_request_is_active(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
@@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
return !list_empty(&rq->sched.link);
}
+static inline bool __i915_request_is_complete(const struct i915_request *rq)
+{
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
+}
+
static inline bool i915_request_completed(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ result = __i915_request_is_complete(rq);
+ rcu_read_unlock();
+
+ return result;
}
static inline void i915_request_mark_complete(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index c53a222e3dec..713770fb2b92 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1880,7 +1880,7 @@ static int igt_cs_tlb(void *arg)
vma = i915_vma_instance(out, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto out_put_batch;
+ goto out_put_out;
}
err = i915_vma_pin(vma, 0, 0,
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h
index c642ae17837f..1e582270c6ea 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.h
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h
@@ -7,6 +7,7 @@
#define __DCSS_PRV_H__
#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
#include <linux/io.h>
#include <video/videomode.h>
@@ -165,6 +166,8 @@ void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
/* SCALER */
int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base);
void dcss_scaler_exit(struct dcss_scaler *scl);
+void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num,
+ enum drm_scaling_filter scaling_filter);
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index e13652e3a115..03ba88f7f995 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -103,15 +103,15 @@ static bool dcss_plane_can_rotate(const struct drm_format_info *format,
bool mod_present, u64 modifier,
unsigned int rotation)
{
- bool linear_format = !mod_present ||
- (mod_present && modifier == DRM_FORMAT_MOD_LINEAR);
+ bool linear_format = !mod_present || modifier == DRM_FORMAT_MOD_LINEAR;
u32 supported_rotation = DRM_MODE_ROTATE_0;
if (!format->is_yuv && linear_format)
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_MASK;
else if (!format->is_yuv &&
- modifier == DRM_FORMAT_MOD_VIVANTE_TILED)
+ (modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
+ modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED))
supported_rotation = DRM_MODE_ROTATE_MASK |
DRM_MODE_REFLECT_MASK;
else if (format->is_yuv && linear_format &&
@@ -257,7 +257,8 @@ static bool dcss_plane_needs_setup(struct drm_plane_state *state,
state->src_h != old_state->src_h ||
fb->format->format != old_fb->format->format ||
fb->modifier != old_fb->modifier ||
- state->rotation != old_state->rotation;
+ state->rotation != old_state->rotation ||
+ state->scaling_filter != old_state->scaling_filter;
}
static void dcss_plane_atomic_update(struct drm_plane *plane,
@@ -272,6 +273,7 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
u32 src_w, src_h, dst_w, dst_h;
struct drm_rect src, dst;
bool enable = true;
+ bool is_rotation_90_or_270;
if (!fb || !state->crtc || !state->visible)
return;
@@ -309,8 +311,16 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
dcss_plane_atomic_set_base(dcss_plane);
+ is_rotation_90_or_270 = state->rotation & (DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_270);
+
+ dcss_scaler_set_filter(dcss->scaler, dcss_plane->ch_num,
+ state->scaling_filter);
+
dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num,
- state->fb->format, src_w, src_h,
+ state->fb->format,
+ is_rotation_90_or_270 ? src_h : src_w,
+ is_rotation_90_or_270 ? src_w : src_h,
dst_w, dst_h,
drm_mode_vrefresh(&crtc_state->mode));
@@ -388,6 +398,10 @@ struct dcss_plane *dcss_plane_init(struct drm_device *drm,
if (ret)
return ERR_PTR(ret);
+ drm_plane_create_scaling_filter_property(&dcss_plane->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
drm_plane_create_rotation_property(&dcss_plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
index cd21905de580..47852b9dd5ea 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-scaler.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
@@ -77,6 +77,8 @@ struct dcss_scaler_ch {
u32 c_vstart;
u32 c_hstart;
+
+ bool use_nn_interpolation;
};
struct dcss_scaler {
@@ -243,6 +245,17 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
}
}
+static void dcss_scaler_nearest_neighbor_filter(bool use_5_taps,
+ int coef[][PSC_NUM_TAPS])
+{
+ int i, j;
+
+ for (i = 0; i < PSC_STORED_PHASES; i++)
+ for (j = 0; j < PSC_NUM_TAPS; j++)
+ coef[i][j] = j == PSC_NUM_TAPS >> 1 ?
+ (1 << PSC_COEFF_PRECISION) : 0;
+}
+
/**
* dcss_scaler_filter_design() - Compute filter coefficients using
* Gaussian filter.
@@ -253,7 +266,8 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
*/
static void dcss_scaler_filter_design(int src_length, int dst_length,
bool use_5_taps, bool phase0_identity,
- int coef[][PSC_NUM_TAPS])
+ int coef[][PSC_NUM_TAPS],
+ bool nn_interpolation)
{
int fc_q;
@@ -263,8 +277,11 @@ static void dcss_scaler_filter_design(int src_length, int dst_length,
else
fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES);
- /* compute gaussian filter coefficients */
- dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
+ if (nn_interpolation)
+ dcss_scaler_nearest_neighbor_filter(use_5_taps, coef);
+ else
+ /* compute gaussian filter coefficients */
+ dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
}
static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs)
@@ -653,12 +670,14 @@ static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
/* horizontal luma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
- src_xres == dst_xres, coef);
+ src_xres == dst_xres, coef,
+ ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical luma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
- src_yres == dst_yres, coef);
+ src_yres == dst_yres, coef,
+ ch->use_nn_interpolation);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
@@ -678,14 +697,14 @@ static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
/* horizontal chroma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
(src_xres == dst_xres) && (ch->c_hstart == 0),
- coef);
+ coef, ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef);
/* vertical chroma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
(src_yres == dst_yres) && (ch->c_vstart == 0),
- coef);
+ coef, ch->use_nn_interpolation);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
else
@@ -700,12 +719,14 @@ static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch,
/* horizontal RGB */
dcss_scaler_filter_design(src_xres, dst_xres, false,
- src_xres == dst_xres, coef);
+ src_xres == dst_xres, coef,
+ ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical RGB */
dcss_scaler_filter_design(src_yres, dst_yres, false,
- src_yres == dst_yres, coef);
+ src_yres == dst_yres, coef,
+ ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
}
@@ -751,6 +772,14 @@ static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch,
ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS;
}
+void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num,
+ enum drm_scaling_filter scaling_filter)
+{
+ struct dcss_scaler_ch *ch = &scl->ch[ch_num];
+
+ ch->use_nn_interpolation = scaling_filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR;
+}
+
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 832e5280a6ed..de62966243cd 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -225,7 +225,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz
mutex_init(&bo->lock);
INIT_LIST_HEAD(&bo->va);
-
+ bo->base.map_wc = true;
bo->base.base.funcs = &lima_gem_funcs;
return &bo->base.base;
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
index b3990126562c..71c689b573c9 100644
--- a/drivers/gpu/drm/mcde/Kconfig
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -4,6 +4,7 @@ config DRM_MCDE
depends on CMA
depends on ARM || COMPILE_TEST
depends on OF
+ depends on COMMON_CLK
select MFD_SYSCON
select DRM_MIPI_DSI
select DRM_BRIDGE
diff --git a/drivers/gpu/drm/mcde/Makefile b/drivers/gpu/drm/mcde/Makefile
index fe28f4e0fe46..15d9c89a3273 100644
--- a/drivers/gpu/drm/mcde/Makefile
+++ b/drivers/gpu/drm/mcde/Makefile
@@ -1,3 +1,3 @@
-mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_display.o
+mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_clk_div.o mcde_display.o
obj-$(CONFIG_DRM_MCDE) += mcde_drm.o
diff --git a/drivers/gpu/drm/mcde/mcde_clk_div.c b/drivers/gpu/drm/mcde/mcde_clk_div.c
new file mode 100644
index 000000000000..038821d2ef80
--- /dev/null
+++ b/drivers/gpu/drm/mcde/mcde_clk_div.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+
+#include "mcde_drm.h"
+#include "mcde_display_regs.h"
+
+/* The MCDE internal clock dividers for FIFO A and B */
+struct mcde_clk_div {
+ struct clk_hw hw;
+ struct mcde *mcde;
+ u32 cr;
+ u32 cr_div;
+};
+
+static int mcde_clk_div_enable(struct clk_hw *hw)
+{
+ struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
+ struct mcde *mcde = cdiv->mcde;
+ u32 val;
+
+ spin_lock(&mcde->fifo_crx1_lock);
+ val = readl(mcde->regs + cdiv->cr);
+ /*
+ * Select the PLL72 (LCD) clock as parent
+ * FIXME: implement other parents.
+ */
+ val &= ~MCDE_CRX1_CLKSEL_MASK;
+ val |= MCDE_CRX1_CLKSEL_CLKPLL72 << MCDE_CRX1_CLKSEL_SHIFT;
+ /* Internal clock */
+ val |= MCDE_CRA1_CLKTYPE_TVXCLKSEL1;
+
+ /* Clear then set the divider */
+ val &= ~(MCDE_CRX1_BCD | MCDE_CRX1_PCD_MASK);
+ val |= cdiv->cr_div;
+
+ writel(val, mcde->regs + cdiv->cr);
+ spin_unlock(&mcde->fifo_crx1_lock);
+
+ return 0;
+}
+
+static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate, bool set_parent)
+{
+ int best_div = 1, div;
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long best_prate = 0;
+ unsigned long best_diff = ~0ul;
+ int max_div = (1 << MCDE_CRX1_PCD_BITS) - 1;
+
+ for (div = 1; div < max_div; div++) {
+ unsigned long this_prate, div_rate, diff;
+
+ if (set_parent)
+ this_prate = clk_hw_round_rate(parent, rate * div);
+ else
+ this_prate = *prate;
+ div_rate = DIV_ROUND_UP_ULL(this_prate, div);
+ diff = abs(rate - div_rate);
+
+ if (diff < best_diff) {
+ best_div = div;
+ best_diff = diff;
+ best_prate = this_prate;
+ }
+ }
+
+ *prate = best_prate;
+ return best_div;
+}
+
+static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int div = mcde_clk_div_choose_div(hw, rate, prate, true);
+
+ return DIV_ROUND_UP_ULL(*prate, div);
+}
+
+static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
+ struct mcde *mcde = cdiv->mcde;
+ u32 cr;
+ int div;
+
+ /*
+ * If the MCDE is not powered we can't access registers.
+ * It will come up with 0 in the divider register bits, which
+ * means "divide by 2".
+ */
+ if (!regulator_is_enabled(mcde->epod))
+ return DIV_ROUND_UP_ULL(prate, 2);
+
+ cr = readl(mcde->regs + cdiv->cr);
+ if (cr & MCDE_CRX1_BCD)
+ return prate;
+
+ /* 0 in the PCD means "divide by 2", 1 means "divide by 3" etc */
+ div = cr & MCDE_CRX1_PCD_MASK;
+ div += 2;
+
+ return DIV_ROUND_UP_ULL(prate, div);
+}
+
+static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
+ int div = mcde_clk_div_choose_div(hw, rate, &prate, false);
+ u32 cr = 0;
+
+ /*
+ * We cache the CR bits to set the divide in the state so that
+ * we can call this before we can even write to the hardware.
+ */
+ if (div == 1) {
+ /* Bypass clock divider */
+ cr |= MCDE_CRX1_BCD;
+ } else {
+ div -= 2;
+ cr |= div & MCDE_CRX1_PCD_MASK;
+ }
+ cdiv->cr_div = cr;
+
+ return 0;
+}
+
+static const struct clk_ops mcde_clk_div_ops = {
+ .enable = mcde_clk_div_enable,
+ .recalc_rate = mcde_clk_div_recalc_rate,
+ .round_rate = mcde_clk_div_round_rate,
+ .set_rate = mcde_clk_div_set_rate,
+};
+
+int mcde_init_clock_divider(struct mcde *mcde)
+{
+ struct device *dev = mcde->dev;
+ struct mcde_clk_div *fifoa;
+ struct mcde_clk_div *fifob;
+ const char *parent_name;
+ struct clk_init_data fifoa_init = {
+ .name = "fifoa",
+ .ops = &mcde_clk_div_ops,
+ .parent_names = &parent_name,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk_init_data fifob_init = {
+ .name = "fifob",
+ .ops = &mcde_clk_div_ops,
+ .parent_names = &parent_name,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ int ret;
+
+ spin_lock_init(&mcde->fifo_crx1_lock);
+ parent_name = __clk_get_name(mcde->lcd_clk);
+
+ /* Allocate 2 clocks */
+ fifoa = devm_kzalloc(dev, sizeof(*fifoa), GFP_KERNEL);
+ if (!fifoa)
+ return -ENOMEM;
+ fifob = devm_kzalloc(dev, sizeof(*fifob), GFP_KERNEL);
+ if (!fifob)
+ return -ENOMEM;
+
+ fifoa->mcde = mcde;
+ fifoa->cr = MCDE_CRA1;
+ fifoa->hw.init = &fifoa_init;
+ ret = devm_clk_hw_register(dev, &fifoa->hw);
+ if (ret) {
+ dev_err(dev, "error registering FIFO A clock divider\n");
+ return ret;
+ }
+ mcde->fifoa_clk = fifoa->hw.clk;
+
+ fifob->mcde = mcde;
+ fifob->cr = MCDE_CRB1;
+ fifob->hw.init = &fifob_init;
+ ret = devm_clk_hw_register(dev, &fifob->hw);
+ if (ret) {
+ dev_err(dev, "error registering FIFO B clock divider\n");
+ return ret;
+ }
+ mcde->fifob_clk = fifob->hw.clk;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index c271e5bf042e..7c2e0b865441 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/regulator/consumer.h>
+#include <linux/media-bus-format.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
@@ -16,6 +17,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
@@ -57,10 +59,15 @@ enum mcde_overlay {
MCDE_OVERLAY_5,
};
-enum mcde_dsi_formatter {
+enum mcde_formatter {
MCDE_DSI_FORMATTER_0 = 0,
MCDE_DSI_FORMATTER_1,
MCDE_DSI_FORMATTER_2,
+ MCDE_DSI_FORMATTER_3,
+ MCDE_DSI_FORMATTER_4,
+ MCDE_DSI_FORMATTER_5,
+ MCDE_DPI_FORMATTER_0,
+ MCDE_DPI_FORMATTER_1,
};
void mcde_display_irq(struct mcde *mcde)
@@ -81,7 +88,7 @@ void mcde_display_irq(struct mcde *mcde)
*
* TODO: Currently only one DSI link is supported.
*/
- if (mcde_dsi_irq(mcde->mdsi)) {
+ if (!mcde->dpi_output && mcde_dsi_irq(mcde->mdsi)) {
u32 val;
/*
@@ -243,73 +250,70 @@ static int mcde_configure_extsrc(struct mcde *mcde, enum mcde_extsrc src,
val = 0 << MCDE_EXTSRCXCONF_BUF_ID_SHIFT;
val |= 1 << MCDE_EXTSRCXCONF_BUF_NB_SHIFT;
val |= 0 << MCDE_EXTSRCXCONF_PRI_OVLID_SHIFT;
- /*
- * MCDE has inverse semantics from DRM on RBG/BGR which is why
- * all the modes are inversed here.
- */
+
switch (format) {
case DRM_FORMAT_ARGB8888:
val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ABGR8888:
val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB8888:
val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XBGR8888:
val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_RGB888:
val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_BGR888:
val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ARGB4444:
val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ABGR4444:
val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB4444:
val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XBGR4444:
val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB1555:
val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XBGR1555:
val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_RGB565:
val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_BGR565:
val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_YUV422:
val |= MCDE_EXTSRCXCONF_BPP_YCBCR422 <<
@@ -556,6 +560,7 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
break;
case MCDE_VIDEO_FORMATTER_FLOW:
+ case MCDE_DPI_FORMATTER_FLOW:
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
@@ -564,7 +569,7 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
default:
dev_err(mcde->dev, "unknown flow mode %d\n",
mcde->flow_mode);
- break;
+ return;
}
writel(val, mcde->regs + sync);
@@ -594,10 +599,35 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
mcde->regs + mux);
break;
}
+
+ /*
+ * If using DPI configure the sync event.
+ * TODO: this is for LCD only, it does not cover TV out.
+ */
+ if (mcde->dpi_output) {
+ u32 stripwidth;
+
+ stripwidth = 0xF000 / (mode->vdisplay * 4);
+ dev_info(mcde->dev, "stripwidth: %d\n", stripwidth);
+
+ val = MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO |
+ (mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_HWREQVCNT_SHIFT |
+ MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO |
+ (mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_SWINTVCNT_SHIFT;
+
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ writel(val, mcde->regs + MCDE_SYNCHCONFA);
+ break;
+ case MCDE_FIFO_B:
+ writel(val, mcde->regs + MCDE_SYNCHCONFB);
+ break;
+ }
+ }
}
static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
- enum mcde_dsi_formatter fmt,
+ enum mcde_formatter fmt,
int fifo_wtrmrk)
{
u32 val;
@@ -618,12 +648,49 @@ static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
}
val = fifo_wtrmrk << MCDE_CTRLX_FIFOWTRMRK_SHIFT;
- /* We only support DSI formatting for now */
- val |= MCDE_CTRLX_FORMTYPE_DSI <<
- MCDE_CTRLX_FORMTYPE_SHIFT;
- /* Select the formatter to use for this FIFO */
- val |= fmt << MCDE_CTRLX_FORMID_SHIFT;
+ /*
+ * Select the formatter to use for this FIFO
+ *
+ * The register definitions imply that different IDs should be used
+ * by the DSI formatters depending on if they are in VID or CMD
+ * mode, and the manual says they are dedicated but identical.
+ * The vendor code uses them as it seems fit.
+ */
+ switch (fmt) {
+ case MCDE_DSI_FORMATTER_0:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI0VID << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_1:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI0CMD << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_2:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI1VID << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_3:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI1CMD << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_4:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI2VID << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_5:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI2CMD << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DPI_FORMATTER_0:
+ val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DPIA << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DPI_FORMATTER_1:
+ val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DPIB << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ }
writel(val, mcde->regs + ctrl);
/* Blend source with Alpha 0xff on FIFO */
@@ -631,17 +698,54 @@ static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
0xff << MCDE_CRX0_ALPHABLEND_SHIFT;
writel(val, mcde->regs + cr0);
- /* Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */
-
- /* Use the MCDE clock for this FIFO */
- val = MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT;
+ spin_lock(&mcde->fifo_crx1_lock);
+ val = readl(mcde->regs + cr1);
+ /*
+ * Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video()
+ * FIXME: a different clock needs to be selected for TV out.
+ */
+ if (mcde->dpi_output) {
+ struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge);
+ u32 bus_format;
+
+ /* Assume RGB888 24 bit if we have no further info */
+ if (!connector->display_info.num_bus_formats) {
+ dev_info(mcde->dev, "panel does not specify bus format, assume RGB888\n");
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ } else {
+ bus_format = connector->display_info.bus_formats[0];
+ }
- /* TODO: when adding DPI support add OUTBPP etc here */
+ /*
+ * Set up the CDWIN and OUTBPP for the LCD
+ *
+ * FIXME: fill this in if you know the correspondance between the MIPI
+ * DPI specification and the media bus formats.
+ */
+ val &= ~MCDE_CRX1_CDWIN_MASK;
+ val &= ~MCDE_CRX1_OUTBPP_MASK;
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT;
+ val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT;
+ break;
+ default:
+ dev_err(mcde->dev, "unknown bus format, assume RGB888\n");
+ val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT;
+ val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT;
+ break;
+ }
+ } else {
+ /* Use the MCDE clock for DSI */
+ val &= ~MCDE_CRX1_CLKSEL_MASK;
+ val |= MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT;
+ }
writel(val, mcde->regs + cr1);
+ spin_unlock(&mcde->fifo_crx1_lock);
};
static void mcde_configure_dsi_formatter(struct mcde *mcde,
- enum mcde_dsi_formatter fmt,
+ enum mcde_formatter fmt,
u32 formatter_frame,
int pkt_size)
{
@@ -681,6 +785,9 @@ static void mcde_configure_dsi_formatter(struct mcde *mcde,
delay0 = MCDE_DSIVID2DELAY0;
delay1 = MCDE_DSIVID2DELAY1;
break;
+ default:
+ dev_err(mcde->dev, "tried to configure a non-DSI formatter as DSI\n");
+ return;
}
/*
@@ -700,7 +807,9 @@ static void mcde_configure_dsi_formatter(struct mcde *mcde,
MCDE_DSICONF0_PACKING_SHIFT;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
- val |= MCDE_DSICONF0_PACKING_RGB666_PACKED <<
+ dev_err(mcde->dev,
+ "we cannot handle the packed RGB666 format\n");
+ val |= MCDE_DSICONF0_PACKING_RGB666 <<
MCDE_DSICONF0_PACKING_SHIFT;
break;
case MIPI_DSI_FMT_RGB565:
@@ -860,73 +969,140 @@ static int mcde_dsi_get_pkt_div(int ppl, int fifo_size)
return 1;
}
-static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *cstate,
- struct drm_plane_state *plane_state)
+static void mcde_setup_dpi(struct mcde *mcde, const struct drm_display_mode *mode,
+ int *fifo_wtrmrk_lvl)
{
- struct drm_crtc *crtc = &pipe->crtc;
- struct drm_plane *plane = &pipe->plane;
- struct drm_device *drm = crtc->dev;
- struct mcde *mcde = to_mcde(drm);
- const struct drm_display_mode *mode = &cstate->mode;
- struct drm_framebuffer *fb = plane->state->fb;
- u32 format = fb->format->format;
- u32 formatter_ppl = mode->hdisplay; /* pixels per line */
- u32 formatter_lpf = mode->vdisplay; /* lines per frame */
- int pkt_size, fifo_wtrmrk;
- int cpp = fb->format->cpp[0];
- int formatter_cpp;
- struct drm_format_name_buf tmp;
- u32 formatter_frame;
- u32 pkt_div;
+ struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge);
+ u32 hsw, hfp, hbp;
+ u32 vsw, vfp, vbp;
u32 val;
- int ret;
- /* This powers up the entire MCDE block and the DSI hardware */
- ret = regulator_enable(mcde->epod);
- if (ret) {
- dev_err(drm->dev, "can't re-enable EPOD regulator\n");
- return;
- }
+ /* FIXME: we only support LCD, implement TV out */
+ hsw = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
- dev_info(drm->dev, "enable MCDE, %d x %d format %s\n",
- mode->hdisplay, mode->vdisplay,
- drm_get_format_name(format, &tmp));
- if (!mcde->mdsi) {
- /* TODO: deal with this for non-DSI output */
- dev_err(drm->dev, "no DSI master attached!\n");
- return;
- }
+ dev_info(mcde->dev, "output on DPI LCD from channel A\n");
+ /* Display actual values */
+ dev_info(mcde->dev, "HSW: %d, HFP: %d, HBP: %d, VSW: %d, VFP: %d, VBP: %d\n",
+ hsw, hfp, hbp, vsw, vfp, vbp);
+
+ /*
+ * The pixel fetcher is 128 64-bit words deep = 1024 bytes.
+ * One overlay of 32bpp (4 cpp) assumed, fetch 160 pixels.
+ * 160 * 4 = 640 bytes.
+ */
+ *fifo_wtrmrk_lvl = 640;
/* Set up the main control, watermark level at 7 */
val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
- /* 24 bits DPI: connect LSB Ch B to D[0:7] */
- val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
- /* TV out: connect LSB Ch B to D[8:15] */
- val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
+
+ /*
+ * This sets up the internal silicon muxing of the DPI
+ * lines. This is how the silicon connects out to the
+ * external pins, then the pins need to be further
+ * configured into "alternate functions" using pin control
+ * to actually get the signals out.
+ *
+ * FIXME: this is hardcoded to the only setting found in
+ * the wild. If we need to use different settings for
+ * different DPI displays, make this parameterizable from
+ * the device tree.
+ */
+ /* 24 bits DPI: connect Ch A LSB to D[0:7] */
+ val |= 0 << MCDE_CONF0_OUTMUX0_SHIFT;
+ /* 24 bits DPI: connect Ch A MID to D[8:15] */
+ val |= 1 << MCDE_CONF0_OUTMUX1_SHIFT;
/* Don't care about this muxing */
val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
- /* 24 bits DPI: connect MID Ch B to D[24:31] */
- val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
- /* 5: 24 bits DPI: connect MSB Ch B to D[32:39] */
- val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
- /* Syncmux bits zero: DPI channel A and B on output pins A and B resp */
+ /* Don't care about this muxing */
+ val |= 0 << MCDE_CONF0_OUTMUX3_SHIFT;
+ /* 24 bits DPI: connect Ch A MSB to D[32:39] */
+ val |= 2 << MCDE_CONF0_OUTMUX4_SHIFT;
+ /* Syncmux bits zero: DPI channel A */
writel(val, mcde->regs + MCDE_CONF0);
- /* Clear any pending interrupts */
- mcde_display_disable_irqs(mcde);
- writel(0, mcde->regs + MCDE_IMSCERR);
- writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
+ /* This hammers us into LCD mode */
+ writel(0, mcde->regs + MCDE_TVCRA);
+
+ /* Front porch and sync width */
+ val = (vsw << MCDE_TVBL1_BEL1_SHIFT);
+ val |= (vfp << MCDE_TVBL1_BSL1_SHIFT);
+ writel(val, mcde->regs + MCDE_TVBL1A);
+ /* The vendor driver sets the same value into TVBL2A */
+ writel(val, mcde->regs + MCDE_TVBL2A);
+
+ /* Vertical back porch */
+ val = (vbp << MCDE_TVDVO_DVO1_SHIFT);
+ /* The vendor drivers sets the same value into TVDVOA */
+ val |= (vbp << MCDE_TVDVO_DVO2_SHIFT);
+ writel(val, mcde->regs + MCDE_TVDVOA);
+
+ /* Horizontal back porch, as 0 = 1 cycle we need to subtract 1 */
+ writel((hbp - 1), mcde->regs + MCDE_TVTIM1A);
+
+ /* Horizongal sync width and horizonal front porch, 0 = 1 cycle */
+ val = ((hsw - 1) << MCDE_TVLBALW_LBW_SHIFT);
+ val |= ((hfp - 1) << MCDE_TVLBALW_ALW_SHIFT);
+ writel(val, mcde->regs + MCDE_TVLBALWA);
+
+ /* Blank some TV registers we don't use */
+ writel(0, mcde->regs + MCDE_TVISLA);
+ writel(0, mcde->regs + MCDE_TVBLUA);
+
+ /* Set up sync inversion etc */
+ val = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= MCDE_LCDTIM1B_IHS;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= MCDE_LCDTIM1B_IVS;
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ val |= MCDE_LCDTIM1B_IOE;
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ val |= MCDE_LCDTIM1B_IPC;
+ writel(val, mcde->regs + MCDE_LCDTIM1A);
+}
- dev_info(drm->dev, "output in %s mode, format %dbpp\n",
+static void mcde_setup_dsi(struct mcde *mcde, const struct drm_display_mode *mode,
+ int cpp, int *fifo_wtrmrk_lvl, int *dsi_formatter_frame,
+ int *dsi_pkt_size)
+{
+ u32 formatter_ppl = mode->hdisplay; /* pixels per line */
+ u32 formatter_lpf = mode->vdisplay; /* lines per frame */
+ int formatter_frame;
+ int formatter_cpp;
+ int fifo_wtrmrk;
+ u32 pkt_div;
+ int pkt_size;
+ u32 val;
+
+ dev_info(mcde->dev, "output in %s mode, format %dbpp\n",
(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ?
"VIDEO" : "CMD",
mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format));
formatter_cpp =
mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format) / 8;
- dev_info(drm->dev, "overlay CPP %d bytes, DSI CPP %d bytes\n",
- cpp,
- formatter_cpp);
+ dev_info(mcde->dev, "Overlay CPP: %d bytes, DSI formatter CPP %d bytes\n",
+ cpp, formatter_cpp);
+
+ /* Set up the main control, watermark level at 7 */
+ val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
+
+ /*
+ * This is the internal silicon muxing of the DPI
+ * (parallell display) lines. Since we are not using
+ * this at all (we are using DSI) these are just
+ * dummy values from the vendor tree.
+ */
+ val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
+ val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
+ val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
+ val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
+ val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
+ writel(val, mcde->regs + MCDE_CONF0);
/* Calculations from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */
@@ -948,9 +1124,9 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
/* The FIFO is 640 entries deep on this v3 hardware */
pkt_div = mcde_dsi_get_pkt_div(mode->hdisplay, 640);
}
- dev_dbg(drm->dev, "FIFO watermark after flooring: %d bytes\n",
+ dev_dbg(mcde->dev, "FIFO watermark after flooring: %d bytes\n",
fifo_wtrmrk);
- dev_dbg(drm->dev, "Packet divisor: %d bytes\n", pkt_div);
+ dev_dbg(mcde->dev, "Packet divisor: %d bytes\n", pkt_div);
/* NOTE: pkt_div is 1 for video mode */
pkt_size = (formatter_ppl * formatter_cpp) / pkt_div;
@@ -958,16 +1134,64 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
if (!(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO))
pkt_size++;
- dev_dbg(drm->dev, "DSI packet size: %d * %d bytes per line\n",
+ dev_dbg(mcde->dev, "DSI packet size: %d * %d bytes per line\n",
pkt_size, pkt_div);
- dev_dbg(drm->dev, "Overlay frame size: %u bytes\n",
+ dev_dbg(mcde->dev, "Overlay frame size: %u bytes\n",
mode->hdisplay * mode->vdisplay * cpp);
- mcde->stride = mode->hdisplay * cpp;
- dev_dbg(drm->dev, "Overlay line stride: %u bytes\n",
- mcde->stride);
/* NOTE: pkt_div is 1 for video mode */
formatter_frame = pkt_size * pkt_div * formatter_lpf;
- dev_dbg(drm->dev, "Formatter frame size: %u bytes\n", formatter_frame);
+ dev_dbg(mcde->dev, "Formatter frame size: %u bytes\n", formatter_frame);
+
+ *fifo_wtrmrk_lvl = fifo_wtrmrk;
+ *dsi_pkt_size = pkt_size;
+ *dsi_formatter_frame = formatter_frame;
+}
+
+static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *cstate,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_device *drm = crtc->dev;
+ struct mcde *mcde = to_mcde(drm);
+ const struct drm_display_mode *mode = &cstate->mode;
+ struct drm_framebuffer *fb = plane->state->fb;
+ u32 format = fb->format->format;
+ int dsi_pkt_size;
+ int fifo_wtrmrk;
+ int cpp = fb->format->cpp[0];
+ struct drm_format_name_buf tmp;
+ u32 dsi_formatter_frame;
+ u32 val;
+ int ret;
+
+ /* This powers up the entire MCDE block and the DSI hardware */
+ ret = regulator_enable(mcde->epod);
+ if (ret) {
+ dev_err(drm->dev, "can't re-enable EPOD regulator\n");
+ return;
+ }
+
+ dev_info(drm->dev, "enable MCDE, %d x %d format %s\n",
+ mode->hdisplay, mode->vdisplay,
+ drm_get_format_name(format, &tmp));
+
+
+ /* Clear any pending interrupts */
+ mcde_display_disable_irqs(mcde);
+ writel(0, mcde->regs + MCDE_IMSCERR);
+ writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
+
+ if (mcde->dpi_output)
+ mcde_setup_dpi(mcde, mode, &fifo_wtrmrk);
+ else
+ mcde_setup_dsi(mcde, mode, cpp, &fifo_wtrmrk,
+ &dsi_formatter_frame, &dsi_pkt_size);
+
+ mcde->stride = mode->hdisplay * cpp;
+ dev_dbg(drm->dev, "Overlay line stride: %u bytes\n",
+ mcde->stride);
/* Drain the FIFO A + channel 0 pipe so we have a clean slate */
mcde_drain_pipe(mcde, MCDE_FIFO_A, MCDE_CHANNEL_0);
@@ -995,29 +1219,47 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
*/
mcde_configure_channel(mcde, MCDE_CHANNEL_0, MCDE_FIFO_A, mode);
- /* Configure FIFO A to use DSI formatter 0 */
- mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0,
- fifo_wtrmrk);
+ if (mcde->dpi_output) {
+ unsigned long lcd_freq;
+
+ /* Configure FIFO A to use DPI formatter 0 */
+ mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DPI_FORMATTER_0,
+ fifo_wtrmrk);
+
+ /* Set up and enable the LCD clock */
+ lcd_freq = clk_round_rate(mcde->fifoa_clk, mode->clock * 1000);
+ ret = clk_set_rate(mcde->fifoa_clk, lcd_freq);
+ if (ret)
+ dev_err(mcde->dev, "failed to set LCD clock rate %lu Hz\n",
+ lcd_freq);
+ ret = clk_prepare_enable(mcde->fifoa_clk);
+ if (ret) {
+ dev_err(mcde->dev, "failed to enable FIFO A DPI clock\n");
+ return;
+ }
+ dev_info(mcde->dev, "LCD FIFO A clk rate %lu Hz\n",
+ clk_get_rate(mcde->fifoa_clk));
+ } else {
+ /* Configure FIFO A to use DSI formatter 0 */
+ mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0,
+ fifo_wtrmrk);
- /*
- * This brings up the DSI bridge which is tightly connected
- * to the MCDE DSI formatter.
- *
- * FIXME: if we want to use another formatter, such as DPI,
- * we need to be more elaborate here and select the appropriate
- * bridge.
- */
- mcde_dsi_enable(mcde->bridge);
+ /*
+ * This brings up the DSI bridge which is tightly connected
+ * to the MCDE DSI formatter.
+ */
+ mcde_dsi_enable(mcde->bridge);
- /* Configure the DSI formatter 0 for the DSI panel output */
- mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0,
- formatter_frame, pkt_size);
+ /* Configure the DSI formatter 0 for the DSI panel output */
+ mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0,
+ dsi_formatter_frame, dsi_pkt_size);
+ }
switch (mcde->flow_mode) {
case MCDE_COMMAND_TE_FLOW:
case MCDE_COMMAND_BTA_TE_FLOW:
case MCDE_VIDEO_TE_FLOW:
- /* We are using TE in some comination */
+ /* We are using TE in some combination */
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
val = MCDE_VSCRC_VSPOL;
else
@@ -1069,8 +1311,12 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
/* Disable FIFO A flow */
mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
- /* This disables the DSI bridge */
- mcde_dsi_disable(mcde->bridge);
+ if (mcde->dpi_output) {
+ clk_disable_unprepare(mcde->fifoa_clk);
+ } else {
+ /* This disables the DSI bridge */
+ mcde_dsi_disable(mcde->bridge);
+ }
event = crtc->state->event;
if (event) {
@@ -1261,6 +1507,10 @@ int mcde_display_init(struct drm_device *drm)
DRM_FORMAT_YUV422,
};
+ ret = mcde_init_clock_divider(mcde);
+ if (ret)
+ return ret;
+
ret = drm_simple_display_pipe_init(drm, &mcde->pipe,
&mcde_display_funcs,
formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/mcde/mcde_display_regs.h b/drivers/gpu/drm/mcde/mcde_display_regs.h
index d3ac7ef5ff9a..2ad78c59d627 100644
--- a/drivers/gpu/drm/mcde/mcde_display_regs.h
+++ b/drivers/gpu/drm/mcde/mcde_display_regs.h
@@ -215,6 +215,80 @@
#define MCDE_OVLXCOMP_Z_SHIFT 27
#define MCDE_OVLXCOMP_Z_MASK 0x78000000
+/* DPI/TV configuration registers, channel A and B */
+#define MCDE_TVCRA 0x00000838
+#define MCDE_TVCRB 0x00000A38
+#define MCDE_TVCR_MOD_TV BIT(0) /* 0 = LCD mode */
+#define MCDE_TVCR_INTEREN BIT(1)
+#define MCDE_TVCR_IFIELD BIT(2)
+#define MCDE_TVCR_TVMODE_SDTV_656P (0 << 3)
+#define MCDE_TVCR_TVMODE_SDTV_656P_LE (3 << 3)
+#define MCDE_TVCR_TVMODE_SDTV_656P_BE (4 << 3)
+#define MCDE_TVCR_SDTVMODE_Y0CBY1CR (0 << 6)
+#define MCDE_TVCR_SDTVMODE_CBY0CRY1 (1 << 6)
+#define MCDE_TVCR_AVRGEN BIT(8)
+#define MCDE_TVCR_CKINV BIT(9)
+
+/* TV blanking control register 1, channel A and B */
+#define MCDE_TVBL1A 0x0000083C
+#define MCDE_TVBL1B 0x00000A3C
+#define MCDE_TVBL1_BEL1_SHIFT 0 /* VFP vertical front porch 11 bits */
+#define MCDE_TVBL1_BSL1_SHIFT 16 /* VSW vertical sync pulse width 11 bits */
+
+/* Pixel processing TV start line, channel A and B */
+#define MCDE_TVISLA 0x00000840
+#define MCDE_TVISLB 0x00000A40
+#define MCDE_TVISL_FSL1_SHIFT 0 /* Field 1 identification start line 11 bits */
+#define MCDE_TVISL_FSL2_SHIFT 16 /* Field 2 identification start line 11 bits */
+
+/* Pixel processing TV DVO offset */
+#define MCDE_TVDVOA 0x00000844
+#define MCDE_TVDVOB 0x00000A44
+#define MCDE_TVDVO_DVO1_SHIFT 0 /* VBP vertical back porch 0 = 0 */
+#define MCDE_TVDVO_DVO2_SHIFT 16
+
+/*
+ * Pixel processing TV Timing 1
+ * HBP horizontal back porch 11 bits horizontal offset
+ * 0 = 1 pixel HBP, 255 = 256 pixels, so actual value - 1
+ */
+#define MCDE_TVTIM1A 0x0000084C
+#define MCDE_TVTIM1B 0x00000A4C
+
+/* Pixel processing TV LBALW */
+/* 0 = 1 clock cycle, 255 = 256 clock cycles */
+#define MCDE_TVLBALWA 0x00000850
+#define MCDE_TVLBALWB 0x00000A50
+#define MCDE_TVLBALW_LBW_SHIFT 0 /* HSW horizonal sync width, line blanking width 11 bits */
+#define MCDE_TVLBALW_ALW_SHIFT 16 /* HFP horizontal front porch, active line width 11 bits */
+
+/* TV blanking control register 1, channel A and B */
+#define MCDE_TVBL2A 0x00000854
+#define MCDE_TVBL2B 0x00000A54
+#define MCDE_TVBL2_BEL2_SHIFT 0 /* Field 2 blanking end line 11 bits */
+#define MCDE_TVBL2_BSL2_SHIFT 16 /* Field 2 blanking start line 11 bits */
+
+/* Pixel processing TV background */
+#define MCDE_TVBLUA 0x00000858
+#define MCDE_TVBLUB 0x00000A58
+#define MCDE_TVBLU_TVBLU_SHIFT 0 /* 8 bits luminance */
+#define MCDE_TVBLU_TVBCB_SHIFT 8 /* 8 bits Cb chrominance */
+#define MCDE_TVBLU_TVBCR_SHIFT 16 /* 8 bits Cr chrominance */
+
+/* Pixel processing LCD timing 1 */
+#define MCDE_LCDTIM1A 0x00000860
+#define MCDE_LCDTIM1B 0x00000A60
+/* inverted vertical sync pulse for HRTFT 0 = active low, 1 active high */
+#define MCDE_LCDTIM1B_IVP BIT(19)
+/* inverted vertical sync, 0 = active high (the normal), 1 = active low */
+#define MCDE_LCDTIM1B_IVS BIT(20)
+/* inverted horizontal sync, 0 = active high (the normal), 1 = active low */
+#define MCDE_LCDTIM1B_IHS BIT(21)
+/* inverted panel clock 0 = rising edge data out, 1 = falling edge data out */
+#define MCDE_LCDTIM1B_IPC BIT(22)
+/* invert output enable 0 = active high, 1 = active low */
+#define MCDE_LCDTIM1B_IOE BIT(23)
+
#define MCDE_CRC 0x00000C00
#define MCDE_CRC_C1EN BIT(2)
#define MCDE_CRC_C2EN BIT(3)
@@ -360,6 +434,7 @@
#define MCDE_CRB1 0x00000A04
#define MCDE_CRX1_PCD_SHIFT 0
#define MCDE_CRX1_PCD_MASK 0x000003FF
+#define MCDE_CRX1_PCD_BITS 10
#define MCDE_CRX1_CLKSEL_SHIFT 10
#define MCDE_CRX1_CLKSEL_MASK 0x00001C00
#define MCDE_CRX1_CLKSEL_CLKPLL72 0
@@ -421,8 +496,20 @@
#define MCDE_ROTACONF 0x0000087C
#define MCDE_ROTBCONF 0x00000A7C
+/* Synchronization event configuration */
#define MCDE_SYNCHCONFA 0x00000880
#define MCDE_SYNCHCONFB 0x00000A80
+#define MCDE_SYNCHCONF_HWREQVEVENT_SHIFT 0
+#define MCDE_SYNCHCONF_HWREQVEVENT_VSYNC (0 << 0)
+#define MCDE_SYNCHCONF_HWREQVEVENT_BACK_PORCH (1 << 0)
+#define MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO (2 << 0)
+#define MCDE_SYNCHCONF_HWREQVEVENT_FRONT_PORCH (3 << 0)
+#define MCDE_SYNCHCONF_HWREQVCNT_SHIFT 2 /* 14 bits */
+#define MCDE_SYNCHCONF_SWINTVEVENT_VSYNC (0 << 16)
+#define MCDE_SYNCHCONF_SWINTVEVENT_BACK_PORCH (1 << 16)
+#define MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO (2 << 16)
+#define MCDE_SYNCHCONF_SWINTVEVENT_FRONT_PORCH (3 << 16)
+#define MCDE_SYNCHCONF_SWINTVCNT_SHIFT 18 /* 14 bits */
/* Channel A+B control registers */
#define MCDE_CTRLA 0x00000884
@@ -465,8 +552,8 @@
#define MCDE_DSICONF0_PACKING_MASK 0x00700000
#define MCDE_DSICONF0_PACKING_RGB565 0
#define MCDE_DSICONF0_PACKING_RGB666 1
-#define MCDE_DSICONF0_PACKING_RGB666_PACKED 2
-#define MCDE_DSICONF0_PACKING_RGB888 3
+#define MCDE_DSICONF0_PACKING_RGB888 2
+#define MCDE_DSICONF0_PACKING_BGR888 3
#define MCDE_DSICONF0_PACKING_HDTV 4
#define MCDE_DSIVID0FRAME 0x00000E04
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
index 8253e2f9993e..ecb70b4b737c 100644
--- a/drivers/gpu/drm/mcde/mcde_drm.h
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -62,6 +62,8 @@ enum mcde_flow_mode {
MCDE_VIDEO_TE_FLOW,
/* Video mode with the formatter itself as sync source */
MCDE_VIDEO_FORMATTER_FLOW,
+ /* DPI video with the formatter itsels as sync source */
+ MCDE_DPI_FORMATTER_FLOW,
};
struct mcde {
@@ -72,6 +74,7 @@ struct mcde {
struct drm_connector *connector;
struct drm_simple_display_pipe pipe;
struct mipi_dsi_device *mdsi;
+ bool dpi_output;
s16 stride;
enum mcde_flow_mode flow_mode;
unsigned int flow_active;
@@ -82,6 +85,11 @@ struct mcde {
struct clk *mcde_clk;
struct clk *lcd_clk;
struct clk *hdmi_clk;
+ /* Handles to the clock dividers for FIFO A and B */
+ struct clk *fifoa_clk;
+ struct clk *fifob_clk;
+ /* Locks the MCDE FIFO control register A and B */
+ spinlock_t fifo_crx1_lock;
struct regulator *epod;
struct regulator *vana;
@@ -105,4 +113,6 @@ void mcde_display_irq(struct mcde *mcde);
void mcde_display_disable_irqs(struct mcde *mcde);
int mcde_display_init(struct drm_device *drm);
+int mcde_init_clock_divider(struct mcde *mcde);
+
#endif /* _MCDE_DRM_H_ */
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9d25181bd7e2..e60566a5739c 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -22,13 +22,13 @@
* The hardware has four display pipes, and the layout is a little
* bit like this::
*
- * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
- * External 0..5 0..3 A,B, 3 x DSI bridge
+ * Memory -> Overlay -> Channel -> FIFO -> 8 formatters -> DSI/DPI
+ * External 0..5 0..3 A,B, 6 x DSI bridge
* source 0..9 C0,C1 2 x DPI
*
* FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
* panels with embedded buffer.
- * 3 of the formatters are for DSI.
+ * 6 of the formatters are for DSI, 3 pairs for VID/CMD respectively.
* 2 of the formatters are for DPI.
*
* Behind the formatters are the DSI or DPI ports that route to
@@ -130,9 +130,37 @@ static int mcde_modeset_init(struct drm_device *drm)
struct mcde *mcde = to_mcde(drm);
int ret;
+ /*
+ * If no other bridge was found, check if we have a DPI panel or
+ * any other bridge connected directly to the MCDE DPI output.
+ * If a DSI bridge is found, DSI will take precedence.
+ *
+ * TODO: more elaborate bridge selection if we have more than one
+ * thing attached to the system.
+ */
if (!mcde->bridge) {
- dev_err(drm->dev, "no display output bridge yet\n");
- return -EPROBE_DEFER;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node,
+ 0, 0, &panel, &bridge);
+ if (ret) {
+ dev_err(drm->dev,
+ "Could not locate any output bridge or panel\n");
+ return ret;
+ }
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ dev_err(drm->dev,
+ "Could not connect panel bridge\n");
+ return PTR_ERR(bridge);
+ }
+ }
+ mcde->dpi_output = true;
+ mcde->bridge = bridge;
+ mcde->flow_mode = MCDE_DPI_FORMATTER_FLOW;
}
mode_config = &drm->mode_config;
@@ -156,13 +184,7 @@ static int mcde_modeset_init(struct drm_device *drm)
return ret;
}
- /*
- * Attach the DSI bridge
- *
- * TODO: when adding support for the DPI bridge or several DSI bridges,
- * we selectively connect the bridge(s) here instead of this simple
- * attachment.
- */
+ /* Attach the bridge. */
ret = drm_simple_display_pipe_attach_bridge(&mcde->pipe,
mcde->bridge);
if (ret) {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index bfe994230543..bdd37eadecd5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -829,8 +829,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_crtc->cmdq_client =
cmdq_mbox_create(mtk_crtc->mmsys_dev,
- drm_crtc_index(&mtk_crtc->base),
- 2000);
+ drm_crtc_index(&mtk_crtc->base));
if (IS_ERR(mtk_crtc->cmdq_client)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base));
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 1d9e00b69462..5aa52b7afeec 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -7,6 +7,7 @@
#define MTK_DRM_DDP_COMP_H
#include <linux/io.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
struct device;
struct device_node;
@@ -35,39 +36,6 @@ enum mtk_ddp_comp_type {
MTK_DDP_COMP_TYPE_MAX,
};
-enum mtk_ddp_comp_id {
- DDP_COMPONENT_AAL0,
- DDP_COMPONENT_AAL1,
- DDP_COMPONENT_BLS,
- DDP_COMPONENT_CCORR,
- DDP_COMPONENT_COLOR0,
- DDP_COMPONENT_COLOR1,
- DDP_COMPONENT_DITHER,
- DDP_COMPONENT_DPI0,
- DDP_COMPONENT_DPI1,
- DDP_COMPONENT_DSI0,
- DDP_COMPONENT_DSI1,
- DDP_COMPONENT_DSI2,
- DDP_COMPONENT_DSI3,
- DDP_COMPONENT_GAMMA,
- DDP_COMPONENT_OD0,
- DDP_COMPONENT_OD1,
- DDP_COMPONENT_OVL0,
- DDP_COMPONENT_OVL_2L0,
- DDP_COMPONENT_OVL_2L1,
- DDP_COMPONENT_OVL1,
- DDP_COMPONENT_PWM0,
- DDP_COMPONENT_PWM1,
- DDP_COMPONENT_PWM2,
- DDP_COMPONENT_RDMA0,
- DDP_COMPONENT_RDMA1,
- DDP_COMPONENT_RDMA2,
- DDP_COMPONENT_UFOE,
- DDP_COMPONENT_WDMA0,
- DDP_COMPONENT_WDMA1,
- DDP_COMPONENT_ID_MAX,
-};
-
struct mtk_ddp_comp;
struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 7f8eea494147..aad75a22dc33 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -145,8 +145,6 @@ struct meson_dw_hdmi {
struct reset_control *hdmitx_apb;
struct reset_control *hdmitx_ctrl;
struct reset_control *hdmitx_phy;
- struct clk *hdmi_pclk;
- struct clk *venci_clk;
struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
@@ -946,6 +944,29 @@ static void meson_disable_regulator(void *data)
regulator_disable(data);
}
+static void meson_disable_clk(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int meson_enable_clk(struct device *dev, char *name)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Unable to get %s pclk\n", name);
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (!ret)
+ ret = devm_add_action_or_reset(dev, meson_disable_clk, clk);
+
+ return ret;
+}
+
static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -1026,19 +1047,17 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(meson_dw_hdmi->hdmitx))
return PTR_ERR(meson_dw_hdmi->hdmitx);
- meson_dw_hdmi->hdmi_pclk = devm_clk_get(dev, "isfr");
- if (IS_ERR(meson_dw_hdmi->hdmi_pclk)) {
- dev_err(dev, "Unable to get HDMI pclk\n");
- return PTR_ERR(meson_dw_hdmi->hdmi_pclk);
- }
- clk_prepare_enable(meson_dw_hdmi->hdmi_pclk);
+ ret = meson_enable_clk(dev, "isfr");
+ if (ret)
+ return ret;
- meson_dw_hdmi->venci_clk = devm_clk_get(dev, "venci");
- if (IS_ERR(meson_dw_hdmi->venci_clk)) {
- dev_err(dev, "Unable to get venci clk\n");
- return PTR_ERR(meson_dw_hdmi->venci_clk);
- }
- clk_prepare_enable(meson_dw_hdmi->venci_clk);
+ ret = meson_enable_clk(dev, "iahb");
+ if (ret)
+ return ret;
+
+ ret = meson_enable_clk(dev, "venci");
+ if (ret)
+ return ret;
dw_plat_data->regm = devm_regmap_init(dev, NULL, meson_dw_hdmi,
&meson_dw_hdmi_regmap_config);
@@ -1071,6 +1090,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
encoder->possible_crtcs = BIT(0);
+ meson_dw_hdmi_init(meson_dw_hdmi);
+
DRM_DEBUG_DRIVER("encoder initialized\n");
/* Bridge / Connector */
@@ -1095,8 +1116,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(meson_dw_hdmi->hdmi))
return PTR_ERR(meson_dw_hdmi->hdmi);
- meson_dw_hdmi_init(meson_dw_hdmi);
-
next_bridge = of_drm_find_bridge(pdev->dev.of_node);
if (next_bridge)
drm_bridge_attach(encoder, next_bridge,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 0f07f259503d..a977c9f49719 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -37,7 +37,6 @@ static const struct drm_driver mgag200_driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_create_object = drm_gem_shmem_create_object_cached,
DRM_GEM_SHMEM_DRIVER_OPS,
};
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 7e82c41a85f1..bdc989183c64 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -534,8 +534,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
return gpu;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 93da6683a866..4534633fe7cd 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -564,8 +564,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index c0be3a0f36b2..82bebb40234d 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -692,8 +692,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index e6703ae98760..05e0ef58fe32 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -134,7 +134,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index);
- dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
return;
}
@@ -158,7 +158,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
- dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
}
@@ -866,7 +866,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
if (IS_ERR_OR_NULL(gpu_opp))
return;
- dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
dev_pm_opp_put(gpu_opp);
}
@@ -1072,7 +1072,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */
- dev_pm_opp_set_bw(&gpu->pdev->dev, NULL);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
/*
* Make sure the GX domain is off before turning off the GMU (CX)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 87c8b033ad1a..12e75ba360f9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -18,6 +18,10 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(2, 0, 0, 0),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6cf9975e951e..f09175698827 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,8 +191,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct io_pgtable_domain_attr pgtbl_cfg;
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
@@ -202,13 +200,18 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu)
return NULL;
- /*
- * This allows GPU to set the bus attributes required to use system
- * cache on behalf of the iommu page table walker.
- */
- if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+
+ if (adreno_is_a6xx(adreno_gpu)) {
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct io_pgtable_domain_attr pgtbl_cfg;
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+ }
}
mmu = msm_iommu_new(&pdev->dev, iommu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c3775f79525a..b3d9a333591b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -18,6 +18,7 @@
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@@ -211,6 +212,11 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
+static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
+{
+ return ((gpu->revn < 700 && gpu->revn > 599));
+}
+
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 6e971d552911..3bc7ed21de28 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -693,6 +693,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
+ if (state == ST_CONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 97dca3e378b7..d1780bcac8cc 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -167,12 +167,18 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
rc = dp_panel_read_dpcd(dp_panel);
+ if (rc) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
- if (rc || !is_link_rate_valid(bw_code) ||
+ if (!is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
- DRM_ERROR("read dpcd failed %d\n", rc);
- return rc;
+ DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
+ dp_panel->link_info.num_lanes);
+ return -EINVAL;
}
if (dp_panel->dfp_present) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 535a0263ceeb..108c405e03dd 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -457,14 +457,14 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_mode_config_init(ddev);
- /* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
- ret = msm_init_vram(ddev);
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_msm_uninit;
+ goto err_destroy_mdss;
dma_set_max_seg_size(dev, UINT_MAX);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 82cbaf337b50..9d10739c4eb2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -96,6 +96,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!msm_gem_is_locked(obj));
+
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
@@ -211,10 +213,8 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
- fput(vma->vm_file);
- get_file(obj->filp);
vma->vm_pgoff = 0;
- vma->vm_file = obj->filp;
+ vma_set_file(vma, obj->filp);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
@@ -990,6 +990,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (msm_obj->pages)
kvfree(msm_obj->pages);
+ put_iova_vmas(obj);
+
/* dma_buf_detach() grabs resv lock, so we need to unlock
* prior to drm_prime_gem_destroy
*/
@@ -999,11 +1001,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
} else {
msm_gem_vunmap(obj);
put_pages(obj);
+ put_iova_vmas(obj);
msm_gem_unlock(obj);
}
- put_iova_vmas(obj);
-
drm_gem_object_release(obj);
kfree(msm_obj);
@@ -1117,6 +1118,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
struct msm_gem_vma *vma;
struct page **pages;
+ drm_gem_private_object_init(dev, obj, size);
+
msm_gem_lock(obj);
vma = add_vma(obj, NULL);
@@ -1128,9 +1131,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
- drm_gem_private_object_init(dev, obj, size);
-
+ msm_gem_lock(obj);
pages = get_pages(obj);
+ msm_gem_unlock(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 22ac7c692a81..50d881794758 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -139,7 +139,6 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops null_tlb_ops = {
.tlb_flush_all = msm_iommu_tlb_flush_all,
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
- .tlb_flush_leaf = msm_iommu_tlb_flush_walk,
.tlb_add_page = msm_iommu_tlb_add_page,
};
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 6faf17b6408d..6da93551e2e5 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -134,11 +134,8 @@ static int mxsfb_attach_bridge(struct mxsfb_drm_private *mxsfb)
return -ENODEV;
ret = drm_bridge_attach(&mxsfb->encoder, bridge, NULL, 0);
- if (ret) {
- DRM_DEV_ERROR(drm->dev,
- "failed to attach bridge: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(drm->dev, ret, "Failed to attach bridge\n");
mxsfb->bridge = bridge;
@@ -212,7 +209,8 @@ static int mxsfb_load(struct drm_device *drm,
ret = mxsfb_attach_bridge(mxsfb);
if (ret) {
- dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
goto err_vblank;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 6fdddb266fb1..4488e1c061b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -37,6 +37,7 @@ nouveau-y += dispnv50/wimmc37b.o
nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
nouveau-y += dispnv50/wndwc57e.o
+nouveau-y += dispnv50/wndwc67e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 302d4e6fc52f..788db043a342 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV507C, SET_PROCESSING,
- NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV507C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 18d34096f125..093d4ba6910e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV827C, SET_PROCESSING,
- NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV827C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index 27ea3f34706d..abefc2343443 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index 121c24a18f11..31d8b2e4791d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
{ GK104_DISP_CURSOR, 0, curs907a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 36d6b6093d16..5f4f09a601d4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -32,6 +32,7 @@
#include <linux/hdmi.h>
#include <linux/component.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
@@ -221,7 +222,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
- const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
+ const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
@@ -270,7 +271,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
- if (!syncbuf)
+ if (syncbuf < 0)
return 0;
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
@@ -1161,8 +1162,10 @@ nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
static struct drm_encoder *
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
- struct drm_connector_state *connector_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ connector);
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_crtc *crtc = connector_state->crtc;
@@ -2660,6 +2663,14 @@ nv50_display_create(struct drm_device *dev)
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+ if (disp->disp->object.oclass >= GK104_DISP) {
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
+ } else {
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
+ }
+
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 92bddc083617..38dec11e7dda 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -95,7 +95,7 @@ struct nv50_outp_atom {
int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
- u64 syncbuf, struct nv50_dmac *dmac);
+ s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
/*
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index a5d827403660..ea9f8667305e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -22,6 +22,7 @@
#include "head.h"
#include "core.h"
+#include "nvif/push.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl917d.h>
@@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
return 0;
}
+static int
+head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
+}
+
int
head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
@@ -101,7 +127,7 @@ head917d = {
.core_clr = head907d_core_clr,
.curs_layout = head917d_curs_layout,
.curs_format = head507d_curs_format,
- .curs_set = head907d_curs_set,
+ .curs_set = head917d_curs_set,
.curs_clr = head907d_curs_clr,
.base = head917d_base,
.ovly = head907d_ovly,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index a1ac153d5e98..566fbddfc8d7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index 685b70871324..b390029c69ec 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -76,7 +76,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
int ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
- &oclass, 0, &args, sizeof(args), 0,
+ &oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0356474ad6f6..271de3a63f21 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -702,6 +702,11 @@ nv50_wndw_init(struct nv50_wndw *wndw)
nvif_notify_get(&wndw->notify);
}
+static const u64 nv50_cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
int
nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
enum drm_plane_type type, const char *name, int index,
@@ -713,6 +718,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
struct nvif_mmu *mmu = &drm->client.mmu;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_wndw *wndw;
+ const u64 *format_modifiers;
int nformat;
int ret;
@@ -728,10 +734,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
for (nformat = 0; format[nformat]; nformat++);
- ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
- format, nformat,
- nouveau_display(dev)->format_modifiers,
- type, "%s-%d", name, index);
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ format_modifiers = nv50_cursor_format_modifiers;
+ else
+ format_modifiers = nouveau_display(dev)->format_modifiers;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
+ format_modifiers, type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
*pwndw = NULL;
@@ -784,6 +793,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
{}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 3278e2880034..f4e0c5080034 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -129,6 +129,14 @@ int wndwc37e_update(struct nv50_wndw *, u32 *);
int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_ilut_clr(struct nv50_wndw *);
+int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_csc_clr(struct nv50_wndw *);
+
+int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 429be0bb0222..abdd3bb658b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -80,7 +80,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static int
+int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -98,7 +98,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
return 0;
}
-static int
+int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -111,7 +111,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static int
+int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -124,7 +124,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
return 0;
}
-static int
+int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -179,7 +179,7 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static bool
+bool
wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
if (size = size ? size : 1024, size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
new file mode 100644
index 000000000000..7a370fa1df20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57e.h>
+
+static int
+wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 17)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE,
+ NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SET_PARAMS,
+ NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+ PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+ NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+ NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+ NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+ return 0;
+}
+
+static const struct nv50_wndw_func
+wndwc67e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .sema_set = wndwc37e_sema_set,
+ .sema_clr = wndwc37e_sema_clr,
+ .ntfy_set = wndwc37e_ntfy_set,
+ .ntfy_clr = wndwc37e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwc57e_ilut_set,
+ .xlut_clr = wndwc57e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwc67e_image_set,
+ .image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwc67e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwc67e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
index 2a2612d6e1e0..fb223723a38a 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
@@ -66,6 +66,10 @@
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300)
+#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300)
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300)
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index cd9a2e687bb6..57d4f457a7d4 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -33,6 +33,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_PASCAL 0x0a
#define NV_DEVICE_INFO_V0_VOLTA 0x0b
#define NV_DEVICE_INFO_V0_TURING 0x0c
+#define NV_DEVICE_INFO_V0_AMPERE 0x0d
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 2c79beb41126..ba2c28ea43d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -88,6 +88,7 @@
#define GP102_DISP /* cl5070.h */ 0x00009870
#define GV100_DISP /* cl5070.h */ 0x0000c370
#define TU102_DISP /* cl5070.h */ 0x0000c570
+#define GA102_DISP /* cl5070.h */ 0x0000c670
#define GV100_DISP_CAPS 0x0000c373
@@ -103,6 +104,7 @@
#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
#define GV100_DISP_CURSOR /* cl507a.h */ 0x0000c37a
#define TU102_DISP_CURSOR /* cl507a.h */ 0x0000c57a
+#define GA102_DISP_CURSOR /* cl507a.h */ 0x0000c67a
#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
@@ -112,6 +114,7 @@
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c37b
#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c57b
+#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c67b
#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
@@ -135,6 +138,7 @@
#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define GV100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c37d
#define TU102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c57d
+#define GA102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c67d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -145,6 +149,7 @@
#define GV100_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c37e
#define TU102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c57e
+#define GA102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c67e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
index 168d7694ede5..6d3a8a3d2087 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push.h
@@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push)
} while(0)
#endif
-#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \
- PUSH_##o##_HDR((p), s, mA, (c)+(n)); \
- PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
+#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \
+ PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \
+ PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
} while(0)
-#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
- PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
+ PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
- PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
+ PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
- PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
+ PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
- PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
+ PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
- PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
+ PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
- PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
+ PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
- PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
+ PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
- PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
+ PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
- PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
+ PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_1D(X,o,p,s,mA,dA) \
- PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
-#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
- PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
- PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
- PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
- PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1D(X,o,p,s,mA,dA) \
+ PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA))
+#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
+ PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
+ PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
+ PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
+ PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
- PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
- PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
- PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
- PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
- PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \
- X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \
+ X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
-#define PUSH_1P(X,o,p,s,mA,dp,ds) \
- PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
-#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
- PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
- X##mA, (dA))
-#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
- PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1P(X,o,p,s,mA,dp,ds) \
+ PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp))
+#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
+ PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \
+ X##mA, (dA))
+#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
+ PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
#define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 5c007ce62fc3..c920939a1467 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -120,6 +120,7 @@ struct nvkm_device {
GP100 = 0x130,
GV100 = 0x140,
TU100 = 0x160,
+ GA100 = 0x170,
} card_type;
u32 chipset;
u8 chiprev;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 5a96c942d912..0f6fa6631a19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -37,4 +37,5 @@ int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int ga102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 1a39e52e09e3..50cc7c05eac4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -32,4 +32,5 @@ int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int ga100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 34b56b10218a..2ecd52aec1d1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -86,6 +86,8 @@ int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index eaacf8d80527..cdcce5ece6ff 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -37,4 +37,5 @@ int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int ga102_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 81b977319640..640f649ce497 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -92,6 +92,7 @@ int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
static inline int
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 6641fe4c252c..e45ca4583967 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -32,4 +32,5 @@ int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int ga100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index c7a94c94dbf3..72f35a2babcb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -256,6 +256,7 @@ nouveau_backlight_init(struct drm_connector *connector)
case NV_DEVICE_INFO_V0_PASCAL:
case NV_DEVICE_INFO_V0_VOLTA:
case NV_DEVICE_INFO_V0_TURING:
+ case NV_DEVICE_INFO_V0_AMPERE: //XXX: not confirmed
ret = nv50_backlight_init(nv_encoder, &props, &ops);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1386b0fc1640..7ea367a5444d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
@@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_TO_DEVICE);
+ num_pages * PAGE_SIZE, DMA_TO_DEVICE);
+ i += num_pages;
+ }
}
void
@@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
@@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
+
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
+ num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
+ i += num_pages;
+ }
}
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
@@ -942,16 +965,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- if ((old_reg->mem_type == TTM_PL_SYSTEM &&
- new_reg->mem_type == TTM_PL_VRAM) ||
- (old_reg->mem_type == TTM_PL_VRAM &&
- new_reg->mem_type == TTM_PL_SYSTEM)) {
- hop->fpfn = 0;
- hop->lpfn = 0;
- hop->mem_type = TTM_PL_TT;
- hop->flags = 0;
- return -EMULTIHOP;
- }
if (new_reg->mem_type == TTM_PL_TT) {
ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
@@ -995,14 +1008,25 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
/* Hardware assisted copy. */
if (drm->ttm.move) {
+ if ((old_reg->mem_type == TTM_PL_SYSTEM &&
+ new_reg->mem_type == TTM_PL_VRAM) ||
+ (old_reg->mem_type == TTM_PL_VRAM &&
+ new_reg->mem_type == TTM_PL_SYSTEM)) {
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ hop->mem_type = TTM_PL_TT;
+ hop->flags = 0;
+ return -EMULTIHOP;
+ }
ret = nouveau_bo_move_m2mf(bo, evict, ctx,
new_reg);
- if (!ret)
- goto out;
- }
+ } else
+ ret = -ENODEV;
- /* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
+ if (ret) {
+ /* Fallback to software copy. */
+ ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
+ }
out:
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 4f69e4c3dafd..1c3f890377d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
struct drm_nouveau_svm_init *args = data;
int ret;
+ /* We need to fail if svm is disabled */
+ if (!cli->drm->svm)
+ return -ENOSYS;
+
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 8d0d30e08f57..529cb60d5efb 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -35,6 +35,7 @@ nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass,
struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { GA102_DISP, -1 },
{ TU102_DISP, -1 },
{ GV100_DISP, -1 },
{ GP102_DISP, -1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7851bec5f0e5..cdcc851e06f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1815,7 +1815,7 @@ nvf0_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1853,7 +1853,7 @@ nvf1_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1891,7 +1891,7 @@ nv106_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1929,7 +1929,7 @@ nv108_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1967,7 +1967,7 @@ nv117_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2003,7 +2003,7 @@ nv118_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2652,6 +2652,61 @@ nv168_chipset = {
.sec2 = tu102_sec2_new,
};
+static const struct nvkm_device_chip
+nv170_chipset = {
+ .name = "GA100",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .devinit = ga100_devinit_new,
+ .fb = ga100_fb_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .mc = ga100_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+};
+
+static const struct nvkm_device_chip
+nv172_chipset = {
+ .name = "GA102",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .devinit = ga100_devinit_new,
+ .fb = ga102_fb_new,
+ .gpio = ga102_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .mc = ga100_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .disp = ga102_disp_new,
+ .dma = gv100_dma_new,
+};
+
+static const struct nvkm_device_chip
+nv174_chipset = {
+ .name = "GA104",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .devinit = ga100_devinit_new,
+ .fb = ga102_fb_new,
+ .gpio = ga102_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .mc = ga100_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .disp = ga102_disp_new,
+ .dma = gv100_dma_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -3063,6 +3118,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x130: device->card_type = GP100; break;
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
+ case 0x170: device->card_type = GA100; break;
default:
break;
}
@@ -3160,10 +3216,23 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x166: device->chip = &nv166_chipset; break;
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
+ case 0x172: device->chip = &nv172_chipset; break;
+ case 0x174: device->chip = &nv174_chipset; break;
default:
- nvdev_error(device, "unknown chipset (%08x)\n", boot0);
- ret = -ENODEV;
- goto done;
+ if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+ switch (device->chipset) {
+ case 0x170: device->chip = &nv170_chipset; break;
+ default:
+ break;
+ }
+ }
+
+ if (!device->chip) {
+ nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+ ret = -ENODEV;
+ goto done;
+ }
+ break;
}
nvdev_info(device, "NVIDIA %s (%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 03c6d9aef075..147894798786 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -176,6 +176,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
+ case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
default:
args->v0.family = 0;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index cf075311cdd2..b03f043efe26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -17,6 +17,7 @@ nvkm-y += nvkm/engine/disp/gp100.o
nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o
+nvkm-y += nvkm/engine/disp/ga102.o
nvkm-y += nvkm/engine/disp/vga.o
nvkm-y += nvkm/engine/disp/head.o
@@ -42,6 +43,7 @@ nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/sorgp100.o
nvkm-y += nvkm/engine/disp/sorgv100.o
nvkm-y += nvkm/engine/disp/sortu102.o
+nvkm-y += nvkm/engine/disp/sorga102.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/dp.o
@@ -75,6 +77,7 @@ nvkm-y += nvkm/engine/disp/rootgp100.o
nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
nvkm-y += nvkm/engine/disp/roottu102.o
+nvkm-y += nvkm/engine/disp/rootga102.o
nvkm-y += nvkm/engine/disp/capsgv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 3800aeb507d0..55fbfe28c6dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -33,6 +33,12 @@
#include <nvif/event.h>
+/* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
+ * the x86 option ROM. However, the relevant VBIOS table versions weren't modified,
+ * so we're unable to detect this in a nice way.
+ */
+#define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
+
struct lt_state {
struct nvkm_dp *dp;
u8 stat[6];
@@ -238,6 +244,19 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
+ if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.dp->info.script[0])) {
+ /* Execute BeforeLinkTraining script from DP Info table. */
+ while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
+ lnkcmp += 3;
+ lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
+
+ nvbios_init(&dp->outp.disp->engine.subdev, lnkcmp,
+ init.outp = &dp->outp.info;
+ init.or = ior->id;
+ init.link = ior->asy.link;
+ );
+ }
+
/* Set desired link configuration on the source. */
if ((lnkcmp = lt.dp->info.lnkcmp)) {
if (dp->version < 0x30) {
@@ -316,12 +335,14 @@ nvkm_dp_train_init(struct nvkm_dp *dp)
);
}
- /* Execute BeforeLinkTraining script from DP Info table. */
- nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
- init.outp = &dp->outp.info;
- init.or = dp->outp.ior->id;
- init.link = dp->outp.ior->asy.link;
- );
+ if (!AMPERE_IED_HACK(dp->outp.disp)) {
+ /* Execute BeforeLinkTraining script from DP Info table. */
+ nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
+ init.outp = &dp->outp.info;
+ init.or = dp->outp.ior->id;
+ init.link = dp->outp.ior->asy.link;
+ );
+ }
}
static const struct dp_rates {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
new file mode 100644
index 000000000000..aa2e5645fe36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+ga102_disp = {
+ .init = tu102_disp_init,
+ .fini = gv100_disp_fini,
+ .intr = gv100_disp_intr,
+ .uevent = &gv100_disp_chan_uevent,
+ .super = gv100_disp_super,
+ .root = &ga102_disp_root_oclass,
+ .wndw = { .cnt = gv100_disp_wndw_cnt },
+ .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+ .sor = { .cnt = gv100_sor_cnt, .new = ga102_sor_new },
+ .ramht_size = 0x2000,
+};
+
+int
+ga102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return nv50_disp_new_(&ga102_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 09f3038eff26..9f0bb7c6b010 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -150,6 +150,8 @@ void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
+void tu102_sor_dp_vcpi(struct nvkm_ior *, int, u8, u8, u16, u16);
+
void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -207,4 +209,6 @@ int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
int tu102_sor_new(struct nvkm_disp *, int);
+
+int ga102_sor_new(struct nvkm_disp *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index a677161c7f3a..db31b37752a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -86,6 +86,8 @@ void gv100_disp_intr(struct nv50_disp *);
void gv100_disp_super(struct work_struct *);
int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
+int tu102_disp_init(struct nv50_disp *);
+
void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
void nv50_disp_update_sppll1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
new file mode 100644
index 000000000000..9af07c3cf9fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+ga102_disp_root = {
+ .user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
+ {{0,0,GA102_DISP_CURSOR }, gv100_disp_curs_new },
+ {{0,0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+ {{0,0,GA102_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
+ {{0,0,GA102_DISP_WINDOW_CHANNEL_DMA }, gv100_disp_wndw_new },
+ {}
+ },
+};
+
+static int
+ga102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&ga102_disp_root, disp, oclass, data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+ga102_disp_root_oclass = {
+ .base.oclass = GA102_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = ga102_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 7070f5408d92..27bb170d0293 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -41,4 +41,5 @@ extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
extern const struct nvkm_disp_oclass tu102_disp_root_oclass;
+extern const struct nvkm_disp_oclass ga102_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
new file mode 100644
index 000000000000..033827de9116
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static int
+ga102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(sor);
+ const u32 loff = nv50_sor_link(sor);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+
+ switch (sor->dp.bw) {
+ case 0x06: clksor |= 0x00000000; break;
+ case 0x0a: clksor |= 0x00040000; break;
+ case 0x14: clksor |= 0x00080000; break;
+ case 0x1e: clksor |= 0x000c0000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+ if (sor->dp.mst)
+ dpctrl |= 0x40000000;
+ if (sor->dp.ef)
+ dpctrl |= 0x00004000;
+
+ nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+ /*XXX*/
+ nvkm_msec(device, 40, NVKM_DELAY);
+ nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+ nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+ return 0;
+}
+
+static void
+ga102_sor_clock(struct nvkm_ior *sor)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ u32 div2 = 0;
+ if (sor->asy.proto == TMDS) {
+ if (sor->tmds.high_speed)
+ div2 = 1;
+ }
+ nvkm_wr32(device, 0x00ec08 + (sor->id * 0x10), 0x00000000);
+ nvkm_wr32(device, 0x00ec04 + (sor->id * 0x10), div2);
+}
+
+static const struct nvkm_ior_func
+ga102_sor_hda = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = ga102_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = ga102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+ .hda = {
+ .hpd = gf119_hda_hpd,
+ .eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+ga102_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = ga102_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = ga102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+};
+
+int
+ga102_sor_new(struct nvkm_disp *disp, int id)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda = nvkm_rd32(device, 0x08a15c);
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&ga102_sor_hda, disp, SOR, id);
+ return nvkm_ior_new_(&ga102_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index 59865a934c4b..0cf9e8752d25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head,
u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index 883ae4151ff8..4c85d1d4fbd4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -28,7 +28,7 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-static int
+int
tu102_disp_init(struct nv50_disp *disp)
{
struct nvkm_device *device = disp->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 7deb81b6dbac..4b571cc6bc70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
image.base, image.type, image.size);
- if (!shadow_fetch(bios, mthd, image.size)) {
+ if (!shadow_fetch(bios, mthd, image.base + image.size)) {
nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index 3634cd0630b8..023ddc7c5399 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -64,6 +64,9 @@ pramin_init(struct nvkm_bios *bios, const char *name)
return NULL;
/* we can't get the bios image pointer without PDISP */
+ if (device->card_type >= GA100)
+ addr = device->chipset == 0x170; /*XXX: find the fuse reg for this */
+ else
if (device->card_type >= GM100)
addr = nvkm_rd32(device, 0x021c04);
else
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index b3429371ed82..d1abb64841da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -15,3 +15,4 @@ nvkm-y += nvkm/subdev/devinit/gm107.o
nvkm-y += nvkm/subdev/devinit/gm200.o
nvkm-y += nvkm/subdev/devinit/gv100.o
nvkm-y += nvkm/subdev/devinit/tu102.o
+nvkm-y += nvkm/subdev/devinit/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
new file mode 100644
index 000000000000..636a92128f6c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvbios_pll info;
+ int head = type - PLL_VPLL0;
+ int N, fN, M, P;
+ int ret;
+
+ ret = nvbios_pll_parse(device->bios, type, &info);
+ if (ret)
+ return ret;
+
+ ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+ if (ret < 0)
+ return ret;
+
+ switch (info.type) {
+ case PLL_VPLL0:
+ case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
+ nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02080004);
+ nvkm_wr32(device, 0x00ef18 + (head * 0x40), (N << 16) | fN);
+ nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) | M);
+ nvkm_wr32(device, 0x00e9c0 + (head * 0x04), 0x00000001);
+ break;
+ default:
+ nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct nvkm_devinit_func
+ga100_devinit = {
+ .init = nv50_devinit_init,
+ .post = tu102_devinit_post,
+ .pll_set = ga100_devinit_pll_set,
+};
+
+int
+ga100_devinit_new(struct nvkm_device *device, int index, struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&ga100_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index 94723352137a..05961e624264 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -19,4 +19,5 @@ void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
int index, struct nvkm_devinit *);
int nv04_devinit_post(struct nvkm_devinit *, bool);
+int tu102_devinit_post(struct nvkm_devinit *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 397670e72fff..9a469bf482f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -65,7 +65,7 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
-static int
+int
tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 43a42159a3d0..5d0bab8ecb43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -32,6 +32,8 @@ nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/gv100.o
+nvkm-y += nvkm/subdev/fb/ga100.o
+nvkm-y += nvkm/subdev/fb/ga102.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -52,6 +54,7 @@ nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
nvkm-y += nvkm/subdev/fb/ramgm200.o
nvkm-y += nvkm/subdev/fb/ramgp100.o
+nvkm-y += nvkm/subdev/fb/ramga102.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
new file mode 100644
index 000000000000..bf82686851cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga100_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = gp100_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+ga100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&ga100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
new file mode 100644
index 000000000000..bcecf84a6e67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga102_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = ga102_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+ga102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&ga102_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 10ff5d053f7e..feda86a5fba8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -22,7 +22,7 @@
#include "gf100.h"
#include "ram.h"
-static int
+int
gv100_fb_init_page(struct nvkm_fb *fb)
{
return (fb->page == 16) ? 0 : -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 5be9c563350d..66932ac10d15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -82,4 +82,6 @@ int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
struct nvkm_fb **);
bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
+
+int gv100_fb_init_page(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index d723a9b4e3c4..ea7d66f3dd82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -70,4 +70,5 @@ int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int ga102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
new file mode 100644
index 000000000000..298c136cefe0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static const struct nvkm_ram_func
+ga102_ram = {
+};
+
+int
+ga102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+ u32 size = nvkm_rd32(device, 0x1183a4);
+
+ return nvkm_ram_new_(&ga102_ram, fb, type, (u64)size << 20, pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index b2ad5922a1c2..efbbaa080de5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/gpio/nv50.o
nvkm-y += nvkm/subdev/gpio/g94.o
nvkm-y += nvkm/subdev/gpio/gf119.o
nvkm-y += nvkm/subdev/gpio/gk104.o
+nvkm-y += nvkm/subdev/gpio/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
new file mode 100644
index 000000000000..62c791baf400
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ u8 ver, len;
+ u16 entry;
+ int ent = -1;
+
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+ u32 data = nvbios_rd32(bios, entry);
+ u8 line = (data & 0x0000003f);
+ u8 defs = !!(data & 0x00000080);
+ u8 func = (data & 0x0000ff00) >> 8;
+ u8 unk0 = (data & 0x00ff0000) >> 16;
+ u8 unk1 = (data & 0x1f000000) >> 24;
+
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
+ continue;
+
+ nvkm_gpio_set(gpio, 0, func, line, defs);
+
+ nvkm_mask(device, 0x021200 + (line * 4), 0xff, unk0);
+ if (unk1--)
+ nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
+ }
+}
+
+static int
+ga102_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 data = ((dir ^ 1) << 13) | (out << 12);
+ nvkm_mask(device, 0x021200 + (line * 4), 0x00003000, data);
+ nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
+ return 0;
+}
+
+static int
+ga102_gpio_sense(struct nvkm_gpio *gpio, int line)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ return !!(nvkm_rd32(device, 0x021200 + (line * 4)) & 0x00004000);
+}
+
+static void
+ga102_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x021640);
+ u32 intr1 = nvkm_rd32(device, 0x02164c);
+ u32 stat0 = nvkm_rd32(device, 0x021648) & intr0;
+ u32 stat1 = nvkm_rd32(device, 0x021654) & intr1;
+ *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
+ *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
+ nvkm_wr32(device, 0x021640, intr0);
+ nvkm_wr32(device, 0x02164c, intr1);
+}
+
+static void
+ga102_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 inte0 = nvkm_rd32(device, 0x021648);
+ u32 inte1 = nvkm_rd32(device, 0x021654);
+ if (type & NVKM_GPIO_LO)
+ inte0 = (inte0 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
+ mask >>= 16;
+ data >>= 16;
+ if (type & NVKM_GPIO_LO)
+ inte1 = (inte1 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte1 = (inte1 & ~mask) | data;
+ nvkm_wr32(device, 0x021648, inte0);
+ nvkm_wr32(device, 0x021654, inte1);
+}
+
+static const struct nvkm_gpio_func
+ga102_gpio = {
+ .lines = 32,
+ .intr_stat = ga102_gpio_intr_stat,
+ .intr_mask = ga102_gpio_intr_mask,
+ .drive = ga102_gpio_drive,
+ .sense = ga102_gpio_sense,
+ .reset = ga102_gpio_reset,
+};
+
+int
+ga102_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+ return nvkm_gpio_new_(&ga102_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 723d0284caef..819703913a00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -7,6 +7,7 @@ nvkm-y += nvkm/subdev/i2c/g94.o
nvkm-y += nvkm/subdev/i2c/gf117.o
nvkm-y += nvkm/subdev/i2c/gf119.o
nvkm-y += nvkm/subdev/i2c/gk104.o
+nvkm-y += nvkm/subdev/i2c/gk110.o
nvkm-y += nvkm/subdev/i2c/gm200.o
nvkm-y += nvkm/subdev/i2c/pad.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 30b48896965e..f920eabf8628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,13 @@
#define __NVKM_I2C_AUX_H__
#include "pad.h"
+static inline void
+nvkm_i2c_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ if (i2c->func->aux_autodpcd)
+ i2c->func->aux_autodpcd(i2c, aux, false);
+}
+
struct nvkm_i2c_aux_func {
bool address_only;
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index db7769cb33eb..47068f6f9c55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -77,7 +77,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
- struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ struct nvkm_i2c *i2c = aux->base.pad->i2c;
+ struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
- goto out;
+ goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
@@ -154,7 +157,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
memcpy(data, xbuf, *size);
*size = stat & 0x0000001f;
}
-
+out_err:
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
g94_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index edb6148cbca0..8bd1d442e465 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -33,7 +33,7 @@ static void
gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
}
static int
@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
return -EBUSY;
}
- } while (ctrl & 0x03010000);
+ } while (ctrl & 0x07010000);
/* set some magic, and wait up to 1ms for it to appear */
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
gm200_i2c_aux_fini(aux);
return -EBUSY;
}
- } while ((ctrl & 0x03000000) != urep);
+ } while ((ctrl & 0x07000000) != urep);
return 0;
}
@@ -77,7 +77,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
- struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ struct nvkm_i2c *i2c = aux->base.pad->i2c;
+ struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
- goto out;
+ goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
@@ -155,6 +158,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
*size = stat & 0x0000001f;
}
+out_err:
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
gm200_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
new file mode 100644
index 000000000000..8e3bfa1af52a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "pad.h"
+
+static void
+gk110_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ nvkm_mask(i2c->subdev.device, 0x00e4f8 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
+static const struct nvkm_i2c_func
+gk110_i2c = {
+ .pad_x_new = gf119_i2c_pad_x_new,
+ .pad_s_new = gf119_i2c_pad_s_new,
+ .aux = 4,
+ .aux_stat = gk104_aux_stat,
+ .aux_mask = gk104_aux_mask,
+ .aux_autodpcd = gk110_aux_autodpcd,
+};
+
+int
+gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+ return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
index a23c5f315221..7b2375bff8a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
@@ -24,6 +24,12 @@
#include "priv.h"
#include "pad.h"
+static void
+gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ nvkm_mask(i2c->subdev.device, 0x00d968 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
static const struct nvkm_i2c_func
gm200_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
@@ -31,6 +37,7 @@ gm200_i2c = {
.aux = 8,
.aux_stat = gk104_aux_stat,
.aux_mask = gk104_aux_mask,
+ .aux_autodpcd = gm200_aux_autodpcd,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 461016814f4f..44b7bb7d4777 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_I2C_PAD_H__
#define __NVKM_I2C_PAD_H__
-#include <subdev/i2c.h>
+#include "priv.h"
struct nvkm_i2c_pad {
const struct nvkm_i2c_pad_func *func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index bd86bc298ebe..e35f6036fcfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -23,6 +23,10 @@ struct nvkm_i2c_func {
/* mask on/off interrupt types for a given set of auxch
*/
void (*aux_mask)(struct nvkm_i2c *, u32, u32, u32);
+
+ /* enable/disable HW-initiated DPCD reads
+ */
+ void (*aux_autodpcd)(struct nvkm_i2c *, int aux, bool enable);
};
void g94_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 2340040942c9..1115376bc85f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index f3915f85838e..22e487b493ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 2585ef07532a..ac2b34e9ac6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -14,3 +14,4 @@ nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
nvkm-y += nvkm/subdev/mc/gp10b.o
nvkm-y += nvkm/subdev/mc/tu102.o
+nvkm-y += nvkm/subdev/mc/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
new file mode 100644
index 000000000000..967eb3af11eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga100_mc_intr_unarm(struct nvkm_mc *mc)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81610, 0x00000004);
+}
+
+static void
+ga100_mc_intr_rearm(struct nvkm_mc *mc)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81608, 0x00000004);
+}
+
+static void
+ga100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 intr)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81210, mask & intr );
+ nvkm_wr32(mc->subdev.device, 0xb81410, mask & ~(mask & intr));
+}
+
+static u32
+ga100_mc_intr_stat(struct nvkm_mc *mc)
+{
+ u32 intr_top = nvkm_rd32(mc->subdev.device, 0xb81600), intr = 0x00000000;
+ if (intr_top & 0x00000004)
+ intr = nvkm_mask(mc->subdev.device, 0xb81010, 0x00000000, 0x00000000);
+ return intr;
+}
+
+static void
+ga100_mc_init(struct nvkm_mc *mc)
+{
+ nv50_mc_init(mc);
+ nvkm_wr32(mc->subdev.device, 0xb81210, 0xffffffff);
+}
+
+static const struct nvkm_mc_func
+ga100_mc = {
+ .init = ga100_mc_init,
+ .intr = gp100_mc_intr,
+ .intr_unarm = ga100_mc_intr_unarm,
+ .intr_rearm = ga100_mc_intr_rearm,
+ .intr_mask = ga100_mc_intr_mask,
+ .intr_stat = ga100_mc_intr_stat,
+ .reset = gk104_mc_reset,
+};
+
+int
+ga100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&ga100_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index de91e9a26172..6d5212ae2fd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_mm *mm = &device->fb->ram->vram;
- const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
- const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
- const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
u8 heap = NVKM_MEM_VRAM;
int heapM, heapN, heapU;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 68c271f4250b..30d299ca8795 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -564,9 +564,8 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
- fput(vma->vm_file);
vma->vm_pgoff = 0;
- vma->vm_file = get_file(obj->filp);
+ vma_set_file(vma, obj->filp);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 210e70da3a15..6b4e97bfd46e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -23,76 +23,254 @@
#include "panel-samsung-s6e63m0.h"
/* Manufacturer Command Set */
-#define MCS_ELVSS_ON 0xb1
-#define MCS_MIECTL1 0xc0
-#define MCS_BCMODE 0xc1
+#define MCS_ELVSS_ON 0xb1
+#define MCS_TEMP_SWIRE 0xb2
+#define MCS_MIECTL1 0xc0
+#define MCS_BCMODE 0xc1
#define MCS_ERROR_CHECK 0xd5
#define MCS_READ_ID1 0xda
#define MCS_READ_ID2 0xdb
#define MCS_READ_ID3 0xdc
#define MCS_LEVEL_2_KEY 0xf0
#define MCS_MTP_KEY 0xf1
-#define MCS_DISCTL 0xf2
-#define MCS_SRCCTL 0xf6
-#define MCS_IFCTL 0xf7
-#define MCS_PANELCTL 0xF8
-#define MCS_PGAMMACTL 0xfa
+#define MCS_DISCTL 0xf2
+#define MCS_SRCCTL 0xf6
+#define MCS_IFCTL 0xf7
+#define MCS_PANELCTL 0xf8
+#define MCS_PGAMMACTL 0xfa
#define S6E63M0_LCD_ID_VALUE_M2 0xA4
#define S6E63M0_LCD_ID_VALUE_SM2 0xB4
#define S6E63M0_LCD_ID_VALUE_SM2_1 0xB6
-#define NUM_GAMMA_LEVELS 11
-#define GAMMA_TABLE_COUNT 23
+#define NUM_GAMMA_LEVELS 28
+#define GAMMA_TABLE_COUNT 23
-#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
+#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
/* array of gamma tables for gamma value 2.2 */
static u8 const s6e63m0_gamma_22[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = {
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8,
- 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7,
- 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0,
- 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF,
- 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF,
- 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC,
- 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC,
- 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9,
- 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB,
- 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8,
- 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA,
- 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6,
- 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8,
- 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4,
- 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9,
- 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3,
- 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6,
- 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2,
- 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6,
- 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1,
- 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6,
- 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0,
- 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb },
+ /* 30 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0xA1, 0x51, 0x7B, 0xCE,
+ 0xCB, 0xC2, 0xC7, 0xCB, 0xBC, 0xDA, 0xDD,
+ 0xD3, 0x00, 0x53, 0x00, 0x52, 0x00, 0x6F, },
+ /* 40 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x97, 0x58, 0x71, 0xCC,
+ 0xCB, 0xC0, 0xC5, 0xC9, 0xBA, 0xD9, 0xDC,
+ 0xD1, 0x00, 0x5B, 0x00, 0x5A, 0x00, 0x7A, },
+ /* 50 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x96, 0x58, 0x72, 0xCB,
+ 0xCA, 0xBF, 0xC6, 0xC9, 0xBA, 0xD6, 0xD9,
+ 0xCD, 0x00, 0x61, 0x00, 0x61, 0x00, 0x83, },
+ /* 60 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x91, 0x5E, 0x6E, 0xC9,
+ 0xC9, 0xBD, 0xC4, 0xC9, 0xB8, 0xD3, 0xD7,
+ 0xCA, 0x00, 0x69, 0x00, 0x67, 0x00, 0x8D, },
+ /* 70 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x8E, 0x62, 0x6B, 0xC7,
+ 0xC9, 0xBB, 0xC3, 0xC7, 0xB7, 0xD3, 0xD7,
+ 0xCA, 0x00, 0x6E, 0x00, 0x6C, 0x00, 0x94, },
+ /* 80 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x89, 0x68, 0x65, 0xC9,
+ 0xC9, 0xBC, 0xC1, 0xC5, 0xB6, 0xD2, 0xD5,
+ 0xC9, 0x00, 0x73, 0x00, 0x72, 0x00, 0x9A, },
+ /* 90 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x89, 0x69, 0x64, 0xC7,
+ 0xC8, 0xBB, 0xC0, 0xC5, 0xB4, 0xD2, 0xD5,
+ 0xC9, 0x00, 0x77, 0x00, 0x76, 0x00, 0xA0, },
+ /* 100 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x86, 0x69, 0x60, 0xC6,
+ 0xC8, 0xBA, 0xBF, 0xC4, 0xB4, 0xD0, 0xD4,
+ 0xC6, 0x00, 0x7C, 0x00, 0x7A, 0x00, 0xA7, },
+ /* 110 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x86, 0x6A, 0x60, 0xC5,
+ 0xC7, 0xBA, 0xBD, 0xC3, 0xB2, 0xD0, 0xD4,
+ 0xC5, 0x00, 0x80, 0x00, 0x7E, 0x00, 0xAD, },
+ /* 120 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x82, 0x6B, 0x5E, 0xC4,
+ 0xC8, 0xB9, 0xBD, 0xC2, 0xB1, 0xCE, 0xD2,
+ 0xC4, 0x00, 0x85, 0x00, 0x82, 0x00, 0xB3, },
+ /* 130 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x8C, 0x6C, 0x60, 0xC3,
+ 0xC7, 0xB9, 0xBC, 0xC1, 0xAF, 0xCE, 0xD2,
+ 0xC3, 0x00, 0x88, 0x00, 0x86, 0x00, 0xB8, },
+ /* 140 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x80, 0x6C, 0x5F, 0xC1,
+ 0xC6, 0xB7, 0xBC, 0xC1, 0xAE, 0xCD, 0xD0,
+ 0xC2, 0x00, 0x8C, 0x00, 0x8A, 0x00, 0xBE, },
+ /* 150 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x80, 0x6E, 0x5F, 0xC1,
+ 0xC6, 0xB6, 0xBC, 0xC0, 0xAE, 0xCC, 0xD0,
+ 0xC2, 0x00, 0x8F, 0x00, 0x8D, 0x00, 0xC2, },
+ /* 160 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x7F, 0x6E, 0x5F, 0xC0,
+ 0xC6, 0xB5, 0xBA, 0xBF, 0xAD, 0xCB, 0xCF,
+ 0xC0, 0x00, 0x94, 0x00, 0x91, 0x00, 0xC8, },
+ /* 170 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x7C, 0x6D, 0x5C, 0xC0,
+ 0xC6, 0xB4, 0xBB, 0xBE, 0xAD, 0xCA, 0xCF,
+ 0xC0, 0x00, 0x96, 0x00, 0x94, 0x00, 0xCC, },
+ /* 180 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x7B, 0x6D, 0x5B, 0xC0,
+ 0xC5, 0xB3, 0xBA, 0xBE, 0xAD, 0xCA, 0xCE,
+ 0xBF, 0x00, 0x99, 0x00, 0x97, 0x00, 0xD0, },
+ /* 190 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x7A, 0x6D, 0x59, 0xC1,
+ 0xC5, 0xB4, 0xB8, 0xBD, 0xAC, 0xC9, 0xCE,
+ 0xBE, 0x00, 0x9D, 0x00, 0x9A, 0x00, 0xD5, },
+ /* 200 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x79, 0x6D, 0x58, 0xC1,
+ 0xC4, 0xB4, 0xB6, 0xBD, 0xAA, 0xCA, 0xCD,
+ 0xBE, 0x00, 0x9F, 0x00, 0x9D, 0x00, 0xD9, },
+ /* 210 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x79, 0x6D, 0x57, 0xC0,
+ 0xC4, 0xB4, 0xB7, 0xBD, 0xAA, 0xC8, 0xCC,
+ 0xBD, 0x00, 0xA2, 0x00, 0xA0, 0x00, 0xDD, },
+ /* 220 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x78, 0x6F, 0x58, 0xBF,
+ 0xC4, 0xB3, 0xB5, 0xBB, 0xA9, 0xC8, 0xCC,
+ 0xBC, 0x00, 0xA6, 0x00, 0xA3, 0x00, 0xE2, },
+ /* 230 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x75, 0x6F, 0x56, 0xBF,
+ 0xC3, 0xB2, 0xB6, 0xBB, 0xA8, 0xC7, 0xCB,
+ 0xBC, 0x00, 0xA8, 0x00, 0xA6, 0x00, 0xE6, },
+ /* 240 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x76, 0x6F, 0x56, 0xC0,
+ 0xC3, 0xB2, 0xB5, 0xBA, 0xA8, 0xC6, 0xCB,
+ 0xBB, 0x00, 0xAA, 0x00, 0xA8, 0x00, 0xE9, },
+ /* 250 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x74, 0x6D, 0x54, 0xBF,
+ 0xC3, 0xB2, 0xB4, 0xBA, 0xA7, 0xC6, 0xCA,
+ 0xBA, 0x00, 0xAD, 0x00, 0xAB, 0x00, 0xED, },
+ /* 260 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x74, 0x6E, 0x54, 0xBD,
+ 0xC2, 0xB0, 0xB5, 0xBA, 0xA7, 0xC5, 0xC9,
+ 0xBA, 0x00, 0xB0, 0x00, 0xAE, 0x00, 0xF1, },
+ /* 270 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x71, 0x6C, 0x50, 0xBD,
+ 0xC3, 0xB0, 0xB4, 0xB8, 0xA6, 0xC6, 0xC9,
+ 0xBB, 0x00, 0xB2, 0x00, 0xB1, 0x00, 0xF4, },
+ /* 280 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x6E, 0x6C, 0x4D, 0xBE,
+ 0xC3, 0xB1, 0xB3, 0xB8, 0xA5, 0xC6, 0xC8,
+ 0xBB, 0x00, 0xB4, 0x00, 0xB3, 0x00, 0xF7, },
+ /* 290 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x71, 0x70, 0x50, 0xBD,
+ 0xC1, 0xB0, 0xB2, 0xB8, 0xA4, 0xC6, 0xC7,
+ 0xBB, 0x00, 0xB6, 0x00, 0xB6, 0x00, 0xFA, },
+ /* 300 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x70, 0x6E, 0x4E, 0xBC,
+ 0xC0, 0xAF, 0xB3, 0xB8, 0xA5, 0xC5, 0xC7,
+ 0xBB, 0x00, 0xB9, 0x00, 0xB8, 0x00, 0xFC, },
+};
+
+#define NUM_ACL_LEVELS 7
+#define ACL_TABLE_COUNT 28
+
+static u8 const s6e63m0_acl[NUM_ACL_LEVELS][ACL_TABLE_COUNT] = {
+ /* NULL ACL */
+ { MCS_BCMODE,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 },
+ /* 40P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x06, 0x0C, 0x11, 0x16, 0x1C, 0x21, 0x26,
+ 0x2B, 0x31, 0x36 },
+ /* 43P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x07, 0x0C, 0x12, 0x18, 0x1E, 0x23, 0x29,
+ 0x2F, 0x34, 0x3A },
+ /* 45P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x07, 0x0D, 0x13, 0x19, 0x1F, 0x25, 0x2B,
+ 0x31, 0x37, 0x3D },
+ /* 47P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x07, 0x0E, 0x14, 0x1B, 0x21, 0x27, 0x2E,
+ 0x34, 0x3B, 0x41 },
+ /* 48P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x08, 0x0E, 0x15, 0x1B, 0x22, 0x29, 0x2F,
+ 0x36, 0x3C, 0x43 },
+ /* 50P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x08, 0x0F, 0x16, 0x1D, 0x24, 0x2A, 0x31,
+ 0x38, 0x3F, 0x46 },
+};
+
+/* This tells us which ACL level goes with which gamma */
+static u8 const s6e63m0_acl_per_gamma[NUM_GAMMA_LEVELS] = {
+ /* 30 - 60 cd: ACL off/NULL */
+ 0, 0, 0, 0,
+ /* 70 - 250 cd: 40P ACL */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 260 - 300 cd: 50P ACL */
+ 6, 6, 6, 6, 6,
+};
+
+/* The ELVSS backlight regulator has 5 levels */
+#define S6E63M0_ELVSS_LEVELS 5
+
+static u8 const s6e63m0_elvss_offsets[S6E63M0_ELVSS_LEVELS] = {
+ 0x00, /* not set */
+ 0x0D, /* 30 cd - 100 cd */
+ 0x09, /* 110 cd - 160 cd */
+ 0x07, /* 170 cd - 200 cd */
+ 0x00, /* 210 cd - 300 cd */
+};
+
+/* This tells us which ELVSS level goes with which gamma */
+static u8 const s6e63m0_elvss_per_gamma[NUM_GAMMA_LEVELS] = {
+ /* 30 - 100 cd */
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 110 - 160 cd */
+ 2, 2, 2, 2, 2, 2,
+ /* 170 - 200 cd */
+ 3, 3, 3, 3,
+ /* 210 - 300 cd */
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
};
struct s6e63m0 {
@@ -102,6 +280,7 @@ struct s6e63m0 {
struct drm_panel panel;
struct backlight_device *bl_dev;
u8 lcd_type;
+ u8 elvss_pulse;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
@@ -187,17 +366,25 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
- /* We attempt to detect what panel is mounted on the controller */
+ /*
+ * We attempt to detect what panel is mounted on the controller.
+ * The third ID byte represents the desired ELVSS pulse for
+ * some displays.
+ */
switch (id2) {
case S6E63M0_LCD_ID_VALUE_M2:
dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI M2\n");
+ ctx->elvss_pulse = id3;
break;
case S6E63M0_LCD_ID_VALUE_SM2:
case S6E63M0_LCD_ID_VALUE_SM2_1:
dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI SM2\n");
+ ctx->elvss_pulse = id3;
break;
default:
dev_info(ctx->dev, "unknown LCD panel type %02x\n", id2);
+ /* Default ELVSS pulse level */
+ ctx->elvss_pulse = 0x16;
break;
}
@@ -210,7 +397,7 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
{
s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
- 0x63, 0x86, 0x1a, 0x33, 0x0d, 0x00, 0x00);
+ 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
0x02, 0x03, 0x1c, 0x10, 0x10);
@@ -226,9 +413,8 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
0x01);
s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
- 0x00, 0x8c, 0x07);
- s6e63m0_dcs_write_seq_static(ctx, 0xb3,
- 0xc);
+ 0x00, 0x8e, 0x07);
+ s6e63m0_dcs_write_seq_static(ctx, 0xb3, 0x6c);
s6e63m0_dcs_write_seq_static(ctx, 0xb5,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
@@ -247,9 +433,12 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
- 0x21, 0x20, 0x1e, 0x1e, 0x00, 0x00, 0x11,
- 0x22, 0x33, 0x44, 0x44, 0x44, 0x55, 0x55,
- 0x66, 0x66, 0x66, 0x66, 0x66, 0x66);
+ 0x21, 0x20, 0x1e, 0x1e);
+
+ s6e63m0_dcs_write_seq_static(ctx, 0xb8,
+ 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66);
s6e63m0_dcs_write_seq_static(ctx, 0xb9,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
@@ -269,7 +458,7 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06,
0x09, 0x0d, 0x0f, 0x12, 0x15, 0x18);
- s6e63m0_dcs_write_seq_static(ctx, 0xb2,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_TEMP_SWIRE,
0x10, 0x10, 0x0b, 0x05);
s6e63m0_dcs_write_seq_static(ctx, MCS_MIECTL1,
@@ -447,15 +636,33 @@ static const struct drm_panel_funcs s6e63m0_drm_funcs = {
static int s6e63m0_set_brightness(struct backlight_device *bd)
{
struct s6e63m0 *ctx = bl_get_data(bd);
-
int brightness = bd->props.brightness;
-
- /* disable and set new gamma */
+ u8 elvss_val;
+ u8 elvss_cmd_set[5];
+ int i;
+
+ /* Adjust ELVSS to candela level */
+ i = s6e63m0_elvss_per_gamma[brightness];
+ elvss_val = ctx->elvss_pulse + s6e63m0_elvss_offsets[i];
+ if (elvss_val > 0x1f)
+ elvss_val = 0x1f;
+ elvss_cmd_set[0] = MCS_TEMP_SWIRE;
+ elvss_cmd_set[1] = elvss_val;
+ elvss_cmd_set[2] = elvss_val;
+ elvss_cmd_set[3] = elvss_val;
+ elvss_cmd_set[4] = elvss_val;
+ s6e63m0_dcs_write(ctx, elvss_cmd_set, 5);
+
+ /* Update the ACL per gamma value */
+ i = s6e63m0_acl_per_gamma[brightness];
+ s6e63m0_dcs_write(ctx, s6e63m0_acl[i],
+ ARRAY_SIZE(s6e63m0_acl[i]));
+
+ /* Update gamma table */
s6e63m0_dcs_write(ctx, s6e63m0_gamma_22[brightness],
ARRAY_SIZE(s6e63m0_gamma_22[brightness]));
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x03);
- /* update gamma table. */
- s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x01);
return s6e63m0_clear_error(ctx);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 597f676a6591..41bbec72b2da 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2267,6 +2267,31 @@ static const struct panel_desc innolux_n116bge = {
},
};
+static const struct drm_display_mode innolux_n125hce_gn1_mode = {
+ .clock = 162000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 4,
+ .vsync_end = 1080 + 4 + 4,
+ .vtotal = 1080 + 4 + 4 + 24,
+};
+
+static const struct panel_desc innolux_n125hce_gn1 = {
+ .modes = &innolux_n125hce_gn1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 276,
+ .height = 155,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode innolux_n156bge_l21_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -4123,6 +4148,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n116bge",
.data = &innolux_n116bge,
}, {
+ .compatible = "innolux,n125hce-gn1",
+ .data = &innolux_n125hce_gn1,
+ }, {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 57a31dd0ffed..3e0723bc36bd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -228,7 +228,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
- obj->base.map_cached = pfdev->coherent;
+ obj->base.map_wc = !pfdev->coherent;
return &obj->base.base;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index be8d68fb0e11..7c1b3481b785 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -347,16 +347,9 @@ static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
mmu_tlb_sync_context(cookie);
}
-static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
- void *cookie)
-{
- mmu_tlb_sync_context(cookie);
-}
-
static const struct iommu_flush_ops mmu_tlb_ops = {
.tlb_flush_all = mmu_tlb_inv_context_s1,
.tlb_flush_walk = mmu_tlb_flush_walk,
- .tlb_flush_leaf = mmu_tlb_flush_leaf,
};
int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 128c38c8a837..7dd0c69baa47 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -115,7 +115,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
if (ttm == NULL)
return NULL;
- if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) {
+ if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) {
kfree(ttm);
return NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d59ef6e92a40..23195d5d4e91 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -730,9 +730,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
rdev->mman.initialized = true;
- ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
- dma_addressing_limited(&rdev->pdev->dev));
-
r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 57fb3eb3a4b4..39c1c339be7b 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -155,7 +155,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
/*
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 5e8006444704..a450497368b2 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -122,7 +122,7 @@ int radeon_vce_init(struct radeon_device *rdev)
if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
return -EINVAL;
- DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n",
+ DRM_INFO("Found VCE firmware/feedback version %d.%d.%d / %d!\n",
start, mid, end, rdev->vce.fb_version);
rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 55960cbb1019..522e51a404cc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -805,25 +805,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
ret = of_dma_configure(drm->dev, dev->of_node, true);
if (ret)
return ret;
- } else {
- /*
- * If we don't have the interconnect property, most likely
- * because of an old DT, we need to set the DMA offset by hand
- * on our device since the RAM mapping is at 0 for the DMA bus,
- * unlike the CPU.
- *
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- *
- * If we have two subsequent calls to dma_direct_set_offset
- * returns -EINVAL. Unfortunately, this happens when we have two
- * backends in the system, and will result in the driver
- * reporting an error while it has been setup properly before.
- * Ignore EINVAL, but it should really be removed eventually.
- */
- ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret && ret != -EINVAL)
- return ret;
}
backend->engine.node = dev->of_node;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 1b96780b4989..74bf1c84b637 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -63,7 +63,10 @@ static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[MAX_ORDER];
static struct ttm_pool_type global_uncached[MAX_ORDER];
-static spinlock_t shrinker_lock;
+static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
+
+static struct mutex shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -76,12 +79,13 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
struct page *p;
void *vaddr;
- if (order) {
- gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ /* Don't set the __GFP_COMP flag for higher order allocations.
+ * Mapping pages directly into an userspace process and calling
+ * put_page() on a TTM allocated page is illegal.
+ */
+ if (order)
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
__GFP_KSWAPD_RECLAIM;
- gfp_flags &= ~__GFP_MOVABLE;
- gfp_flags &= ~__GFP_COMP;
- }
if (!pool->use_dma_alloc) {
p = alloc_pages(gfp_flags, order);
@@ -187,7 +191,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
size_t size = (1ULL << order) * PAGE_SIZE;
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(pool->dev, **dma_addr))
+ if (dma_mapping_error(pool->dev, addr))
return -EFAULT;
}
@@ -236,21 +240,6 @@ static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
return p;
}
-/* Count the number of pages available in a pool_type */
-static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
-{
- unsigned int count = 0;
- struct page *p;
-
- spin_lock(&pt->lock);
- /* Only used for debugfs, the overhead doesn't matter */
- list_for_each_entry(p, &pt->pages, lru)
- ++count;
- spin_unlock(&pt->lock);
-
- return count;
-}
-
/* Initialize and add a pool type to the global shrinker list */
static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
enum ttm_caching caching, unsigned int order)
@@ -261,9 +250,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
@@ -271,9 +260,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p, *tmp;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
list_for_each_entry_safe(p, tmp, &pt->pages, lru)
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -290,8 +279,14 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
+ if (pool->use_dma32)
+ return &global_dma32_write_combined[order];
+
return &global_write_combined[order];
case ttm_uncached:
+ if (pool->use_dma32)
+ return &global_dma32_uncached[order];
+
return &global_uncached[order];
default:
break;
@@ -308,7 +303,7 @@ static unsigned int ttm_pool_shrink(void)
unsigned int num_freed;
struct page *p;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
p = ttm_pool_type_take(pt);
@@ -320,7 +315,7 @@ static unsigned int ttm_pool_shrink(void)
}
list_move_tail(&pt->shrinker_list, &shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
return num_freed;
}
@@ -513,7 +508,6 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
-EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
@@ -531,9 +525,22 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
-EXPORT_SYMBOL(ttm_pool_fini);
#ifdef CONFIG_DEBUG_FS
+/* Count the number of pages available in a pool_type */
+static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
+{
+ unsigned int count = 0;
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ /* Only used for debugfs, the overhead doesn't matter */
+ list_for_each_entry(p, &pt->pages, lru)
+ ++count;
+ spin_unlock(&pt->lock);
+
+ return count;
+}
/* Dump information about the different pool types */
static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
@@ -558,7 +565,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
seq_puts(m, "\t ");
for (i = 0; i < MAX_ORDER; ++i)
@@ -570,6 +577,11 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
seq_puts(m, "uc\t:");
ttm_pool_debugfs_orders(global_uncached, m);
+ seq_puts(m, "wc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_write_combined, m);
+ seq_puts(m, "uc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_uncached, m);
+
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
@@ -589,7 +601,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
atomic_long_read(&allocated_pages), page_pool_size);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
return 0;
}
@@ -633,13 +645,18 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size)
page_pool_size = num_pages;
- spin_lock_init(&shrinker_lock);
+ mutex_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
+
+ ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_dma32_uncached[i], NULL,
+ ttm_uncached, i);
}
mm_shrinker.count_objects = ttm_pool_shrinker_count;
@@ -660,6 +677,9 @@ void ttm_pool_mgr_fini(void)
for (i = 0; i < MAX_ORDER; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);
+
+ ttm_pool_type_fini(&global_dma32_write_combined[i]);
+ ttm_pool_type_fini(&global_dma32_uncached[i]);
}
unregister_shrinker(&mm_shrinker);
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index b5a8dd9fdf02..9269092697d8 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -38,8 +38,6 @@ static const struct drm_driver driver = {
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
/* GEM hooks */
- .gem_create_object = drm_gem_shmem_create_object_cached,
-
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 8b52cb25877c..6a8731ab9d7d 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -78,7 +78,7 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
obj = &bo->base.base;
obj->funcs = &v3d_gem_funcs;
-
+ bo->base.map_wc = true;
INIT_LIST_HEAD(&bo->unref_head);
return &bo->base.base;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 555106220578..98cab0bbe92d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1267,6 +1267,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
card->dai_link = dai_link;
card->num_links = 1;
card->name = vc4_hdmi->variant->card_name;
+ card->driver_name = "vc4-hdmi";
card->dev = dev;
card->owner = THIS_MODULE;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index cccd341e5d67..3b722252d1fb 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -620,11 +620,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
* for now we just allocate globally.
*/
if (!hvs->hvs5)
- /* 96kB */
- drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+ /* 48k words of 2x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
- /* 70k words */
- drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
+ /* 60k words of 4x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 6b39cc2ca18d..5612cab55227 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -437,6 +437,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
static u32 vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
u32 pix_per_line;
u32 lbm;
@@ -472,7 +473,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
lbm = pix_per_line * 16;
}
- lbm = roundup(lbm, 32);
+ /* Align it to 64 or 128 (hvs5) bytes */
+ lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+
+ /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
+ lbm /= vc4->hvs->hvs5 ? 4 : 2;
return lbm;
}
@@ -912,9 +917,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (!vc4_state->is_unity) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->crtc_w,
- SCALER_POS1_SCL_WIDTH) |
+ SCALER5_POS1_SCL_WIDTH) |
VC4_SET_FIELD(vc4_state->crtc_h,
- SCALER_POS1_SCL_HEIGHT));
+ SCALER5_POS1_SCL_HEIGHT));
}
/* Position Word 2: Source Image Size */
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 34612edcabbd..8aa5220885f4 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -273,8 +273,10 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
}
static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
- struct drm_connector_state *conn_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
+ conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
struct drm_gem_cma_object *gem;
struct drm_display_mode *mode;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 9a413091abb6..f8635ccaf9a1 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -403,8 +403,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
if (ret)
return ret;
- fput(vma->vm_file);
- vma->vm_file = get_file(obj->filp);
+ vma_set_file(vma, obj->filp);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 24cc445169e2..a3e0fb5b8671 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -364,6 +364,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
irqwait->request.sequence +=
atomic_read(&cur_irq->irq_received);
irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ break;
case VIA_IRQ_ABSOLUTE:
break;
default:
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 8d8135f424ef..3d6e3a70f318 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -1001,8 +1001,8 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
state = via_check_vheader6(&buf, buf_end);
break;
case state_command:
- if ((HALCYON_HEADER2 == (cmd = *buf)) &&
- supported_3d)
+ cmd = *buf;
+ if ((cmd == HALCYON_HEADER2) && supported_3d)
state = state_header2;
else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
state = state_header1;
@@ -1064,7 +1064,8 @@ via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
state = via_parse_vheader6(dev_priv, &buf, buf_end);
break;
case state_command:
- if (HALCYON_HEADER2 == (cmd = *buf))
+ cmd = *buf;
+ if (cmd == HALCYON_HEADER2)
state = state_header2;
else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
state = state_header1;
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index f336a8fa6666..5fefc88d47e4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -67,8 +67,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
seq_printf(m, "fence %llu %lld\n",
- (u64)atomic64_read(&vgdev->fence_drv.last_seq),
- vgdev->fence_drv.sync_seq);
+ (u64)atomic64_read(&vgdev->fence_drv.last_fence_id),
+ vgdev->fence_drv.current_fence_id);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 3c0e17212c33..6a232553c99b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -127,8 +127,8 @@ typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf);
struct virtio_gpu_fence_driver {
- atomic64_t last_seq;
- uint64_t sync_seq;
+ atomic64_t last_fence_id;
+ uint64_t current_fence_id;
uint64_t context;
struct list_head fences;
spinlock_t lock;
@@ -257,7 +257,7 @@ struct virtio_gpu_fpriv {
struct mutex context_lock;
};
-/* virtio_ioctl.c */
+/* virtgpu_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 11
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
@@ -420,7 +420,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence);
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
- u64 last_seq);
+ u64 fence_id);
/* virtgpu_object.c */
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 5b2a4146c5bd..728ca36f6327 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -48,7 +48,7 @@ static bool virtio_fence_signaled(struct dma_fence *f)
/* leaked fence outside driver before completing
* initialization with virtio_gpu_fence_emit */
return false;
- if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
+ if (atomic64_read(&fence->drv->last_fence_id) >= fence->f.seqno)
return true;
return false;
}
@@ -62,7 +62,8 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
- snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
+ snprintf(str, size, "%llu",
+ (u64)atomic64_read(&fence->drv->last_fence_id));
}
static const struct dma_fence_ops virtio_fence_ops = {
@@ -100,7 +101,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
- fence->f.seqno = ++drv->sync_seq;
+ fence->f.seqno = ++drv->current_fence_id;
dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -112,16 +113,16 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
- u64 last_seq)
+ u64 fence_id)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence, *tmp;
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
- atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
+ atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
- if (last_seq < fence->f.seqno)
+ if (fence_id < fence->f.seqno)
continue;
dma_fence_signal_locked(&fence->f);
list_del(&fence->node);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5417f365d1a3..23eb6d772e40 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -591,8 +591,9 @@ static int verify_blob(struct virtio_gpu_device *vgdev,
return 0;
}
-static int virtio_gpu_resource_create_blob(struct drm_device *dev,
- void *data, struct drm_file *file)
+static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
{
int ret = 0;
uint32_t handle = 0;
@@ -696,6 +697,6 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
- virtio_gpu_resource_create_blob,
+ virtio_gpu_resource_create_blob_ioctl,
DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index d9ad27e00905..d69a5b6da553 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -144,7 +144,6 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
dshmem = &shmem->base.base;
dshmem->base.funcs = &virtio_gpu_shmem_funcs;
- dshmem->map_cached = true;
return &dshmem->base;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 1a1b5bc8e121..d4d39227f2ed 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -82,7 +82,6 @@ static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.release = vkms_release,
.fops = &vkms_driver_fops,
- .gem_create_object = drm_gem_shmem_create_object_cached,
DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 67f80ab1e85f..78fdc1d59186 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -2,6 +2,7 @@
#include <linux/dma-buf-map.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_writeback.h>
#include <drm/drm_probe_helper.h>
@@ -105,8 +106,10 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
}
static void vkms_wb_atomic_commit(struct drm_connector *conn,
- struct drm_connector_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ conn);
struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev);
struct vkms_output *output = &vkmsdev->output;
struct drm_writeback_connector *wb_conn = &output->wb_connector;
@@ -122,7 +125,7 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
crtc_state->active_writeback = conn_state->writeback_job->priv;
crtc_state->wb_pending = true;
spin_unlock_irq(&output->composer_lock);
- drm_writeback_queue_job(wb_conn, state);
+ drm_writeback_queue_job(wb_conn, connector_state);
}
static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 1401fd52f37a..365e6ddbe90f 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -1038,7 +1038,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
mutex_lock(&vgasr_mutex);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
mutex_unlock(&vgasr_mutex);
- pci_wakeup_bus(pdev->bus);
+ pci_resume_bus(pdev->bus);
return dev->bus->pm->runtime_resume(dev);
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 612629678c84..09fa75a2b289 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -899,6 +899,7 @@ config HID_SONY
depends on NEW_LEDS
depends on LEDS_CLASS
select POWER_SUPPLY
+ select CRC32
help
Support for
@@ -907,6 +908,7 @@ config HID_SONY
* Buzz controllers
* Sony PS3 Blue-ray Disk Remote Control (Bluetooth)
* Logitech Harmony adapter for Sony Playstation 3 (Bluetooth)
+ * Guitar Hero Live PS3 and Wii U guitar dongles
config SONY_FF
bool "Sony PS2/3/4 accessories force feedback support"
@@ -1183,4 +1185,6 @@ source "drivers/hid/i2c-hid/Kconfig"
source "drivers/hid/intel-ish-hid/Kconfig"
+source "drivers/hid/amd-sfh-hid/Kconfig"
+
endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 4acb583c92a6..014d21fe7dac 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -142,3 +142,5 @@ obj-$(CONFIG_I2C_HID) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
+
+obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
diff --git a/drivers/hid/amd-sfh-hid/Kconfig b/drivers/hid/amd-sfh-hid/Kconfig
new file mode 100644
index 000000000000..db069a83e9a2
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+menu "AMD SFH HID Support"
+ depends on X86_64 || COMPILE_TEST
+ depends on PCI
+ depends on HID
+
+config AMD_SFH_HID
+ tristate "AMD Sensor Fusion Hub"
+ help
+ If you say yes to this option, support will be included for the
+ AMD Sensor Fusion Hub.
+ This driver will enable sensors functionality on AMD platforms
+ starting from 17h family of RYZEN parts.
+
+ This driver can also be built as a module. If so, the module will
+ be called amd-sfh.
+ Say Y or M here if you want to support AMD SFH. If unsure, say N.
+endmenu
diff --git a/drivers/hid/amd-sfh-hid/Makefile b/drivers/hid/amd-sfh-hid/Makefile
new file mode 100644
index 000000000000..35e704da5612
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile - AMD SFH HID drivers
+# Copyright (c) 2019-2020, Advanced Micro Devices, Inc.
+#
+#
+obj-$(CONFIG_AMD_SFH_HID) += amd_sfh.o
+amd_sfh-objs := amd_sfh_hid.o
+amd_sfh-objs += amd_sfh_client.o
+amd_sfh-objs += amd_sfh_pcie.o
+amd_sfh-objs += hid_descriptor/amd_sfh_hid_desc.o
+
+ccflags-y += -I $(srctree)/$(src)/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
new file mode 100644
index 000000000000..2ab38b715347
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD SFH Client Layer
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ * Authors: Nehal Bakulchandra Shah <Nehal-Bakulchandra.Shah@amd.com>
+ * Sandeep Singh <Sandeep.singh@amd.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/hid.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+
+#include "hid_descriptor/amd_sfh_hid_desc.h"
+#include "amd_sfh_pcie.h"
+#include "amd_sfh_hid.h"
+
+#define AMD_SFH_IDLE_LOOP 200
+
+struct request_list {
+ struct hid_device *hid;
+ struct list_head list;
+ u8 report_id;
+ u8 sensor_idx;
+ u8 report_type;
+ u8 current_index;
+};
+
+static struct request_list req_list;
+
+void amd_sfh_set_report(struct hid_device *hid, int report_id,
+ int report_type)
+{
+ struct amdtp_hid_data *hid_data = hid->driver_data;
+ struct amdtp_cl_data *cli_data = hid_data->cli_data;
+ int i;
+
+ for (i = 0; i < cli_data->num_hid_devices; i++) {
+ if (cli_data->hid_sensor_hubs[i] == hid) {
+ cli_data->cur_hid_dev = i;
+ break;
+ }
+ }
+ amdtp_hid_wakeup(hid);
+}
+
+int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
+{
+ struct amdtp_hid_data *hid_data = hid->driver_data;
+ struct amdtp_cl_data *cli_data = hid_data->cli_data;
+ int i;
+
+ for (i = 0; i < cli_data->num_hid_devices; i++) {
+ if (cli_data->hid_sensor_hubs[i] == hid) {
+ struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL);
+
+ if (!new)
+ return -ENOMEM;
+
+ new->current_index = i;
+ new->sensor_idx = cli_data->sensor_idx[i];
+ new->hid = hid;
+ new->report_type = report_type;
+ new->report_id = report_id;
+ cli_data->report_id[i] = report_id;
+ cli_data->request_done[i] = false;
+ list_add(&new->list, &req_list.list);
+ break;
+ }
+ }
+ schedule_delayed_work(&cli_data->work, 0);
+ return 0;
+}
+
+static void amd_sfh_work(struct work_struct *work)
+{
+ struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work.work);
+ struct request_list *req_node;
+ u8 current_index, sensor_index;
+ u8 report_id, node_type;
+ u8 report_size = 0;
+
+ req_node = list_last_entry(&req_list.list, struct request_list, list);
+ list_del(&req_node->list);
+ current_index = req_node->current_index;
+ sensor_index = req_node->sensor_idx;
+ report_id = req_node->report_id;
+ node_type = req_node->report_type;
+
+ if (node_type == HID_FEATURE_REPORT) {
+ report_size = get_feature_report(sensor_index, report_id,
+ cli_data->feature_report[current_index]);
+ if (report_size)
+ hid_input_report(cli_data->hid_sensor_hubs[current_index],
+ cli_data->report_type[current_index],
+ cli_data->feature_report[current_index], report_size, 0);
+ else
+ pr_err("AMDSFH: Invalid report size\n");
+
+ } else if (node_type == HID_INPUT_REPORT) {
+ report_size = get_input_report(sensor_index, report_id,
+ cli_data->input_report[current_index],
+ cli_data->sensor_virt_addr[current_index]);
+ if (report_size)
+ hid_input_report(cli_data->hid_sensor_hubs[current_index],
+ cli_data->report_type[current_index],
+ cli_data->input_report[current_index], report_size, 0);
+ else
+ pr_err("AMDSFH: Invalid report size\n");
+ }
+ cli_data->cur_hid_dev = current_index;
+ cli_data->sensor_requested_cnt[current_index] = 0;
+ amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]);
+}
+
+static void amd_sfh_work_buffer(struct work_struct *work)
+{
+ struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work_buffer.work);
+ u8 report_size;
+ int i;
+
+ for (i = 0; i < cli_data->num_hid_devices; i++) {
+ report_size = get_input_report(cli_data->sensor_idx[i], cli_data->report_id[i],
+ cli_data->input_report[i],
+ cli_data->sensor_virt_addr[i]);
+ hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
+ cli_data->input_report[i], report_size, 0);
+ }
+ schedule_delayed_work(&cli_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+}
+
+int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+{
+ struct amdtp_cl_data *cl_data = privdata->cl_data;
+ struct amd_mp2_sensor_info info;
+ struct device *dev;
+ u32 feature_report_size;
+ u32 input_report_size;
+ u8 cl_idx;
+ int rc, i;
+
+ dev = &privdata->pdev->dev;
+ cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
+ if (!cl_data)
+ return -ENOMEM;
+
+ cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
+
+ INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
+ INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
+ INIT_LIST_HEAD(&req_list.list);
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ cl_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
+ &cl_data->sensor_dma_addr[i],
+ GFP_KERNEL);
+ cl_data->sensor_sts[i] = 0;
+ cl_data->sensor_requested_cnt[i] = 0;
+ cl_data->cur_hid_dev = i;
+ cl_idx = cl_data->sensor_idx[i];
+ cl_data->report_descr_sz[i] = get_descr_sz(cl_idx, descr_size);
+ if (!cl_data->report_descr_sz[i]) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ feature_report_size = get_descr_sz(cl_idx, feature_size);
+ if (!feature_report_size) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ input_report_size = get_descr_sz(cl_idx, input_size);
+ if (!input_report_size) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ cl_data->feature_report[i] = kzalloc(feature_report_size, GFP_KERNEL);
+ if (!cl_data->feature_report[i]) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ cl_data->input_report[i] = kzalloc(input_report_size, GFP_KERNEL);
+ if (!cl_data->input_report[i]) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ info.period = msecs_to_jiffies(AMD_SFH_IDLE_LOOP);
+ info.sensor_idx = cl_idx;
+ info.dma_address = cl_data->sensor_dma_addr[i];
+
+ cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
+ if (!cl_data->report_descr[i]) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ rc = get_report_descriptor(cl_idx, cl_data->report_descr[i]);
+ if (rc)
+ return rc;
+ rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
+ if (rc)
+ return rc;
+ amd_start_sensor(privdata, info);
+ cl_data->sensor_sts[i] = 1;
+ }
+ privdata->cl_data = cl_data;
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ return 0;
+
+cleanup:
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_virt_addr[i]) {
+ dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
+ cl_data->sensor_virt_addr[i],
+ cl_data->sensor_dma_addr[i]);
+ }
+ kfree(cl_data->feature_report[i]);
+ kfree(cl_data->input_report[i]);
+ kfree(cl_data->report_descr[i]);
+ }
+ kfree(cl_data);
+ return rc;
+}
+
+int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
+{
+ struct amdtp_cl_data *cl_data = privdata->cl_data;
+ int i;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++)
+ amd_stop_sensor(privdata, i);
+
+ cancel_delayed_work_sync(&cl_data->work);
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amdtp_hid_remove(cl_data);
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_virt_addr[i]) {
+ dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
+ cl_data->sensor_virt_addr[i],
+ cl_data->sensor_dma_addr[i]);
+ }
+ }
+ kfree(cl_data);
+ return 0;
+}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
new file mode 100644
index 000000000000..4f989483aa03
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD MP2 Sensors transport driver
+ *
+ * Authors: Nehal Bakulchandra Shah <Nehal-bakulchandra.shah@amd.com>
+ * Sandeep Singh <sandeep.singh@amd.com>
+ */
+#include <linux/hid.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "amd_sfh_hid.h"
+
+#define AMD_SFH_RESPONSE_TIMEOUT 1500
+
+/**
+ * amdtp_hid_parse() - hid-core .parse() callback
+ * @hid: hid device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error
+ */
+static int amdtp_hid_parse(struct hid_device *hid)
+{
+ struct amdtp_hid_data *hid_data = hid->driver_data;
+ struct amdtp_cl_data *cli_data = hid_data->cli_data;
+
+ return hid_parse_report(hid, cli_data->report_descr[hid_data->index],
+ cli_data->report_descr_sz[hid_data->index]);
+}
+
+/* Empty callbacks with success return code */
+static int amdtp_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void amdtp_hid_stop(struct hid_device *hid)
+{
+}
+
+static int amdtp_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void amdtp_hid_close(struct hid_device *hid)
+{
+}
+
+static int amdtp_raw_request(struct hid_device *hdev, u8 reportnum,
+ u8 *buf, size_t len, u8 rtype, int reqtype)
+{
+ return 0;
+}
+
+static void amdtp_hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype)
+{
+ int rc;
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ rc = amd_sfh_get_report(hid, rep->id, rep->type);
+ if (rc)
+ dev_err(&hid->dev, "AMDSFH get report error\n");
+ break;
+ case HID_REQ_SET_REPORT:
+ amd_sfh_set_report(hid, rep->id, reqtype);
+ break;
+ default:
+ break;
+ }
+}
+
+static int amdtp_wait_for_response(struct hid_device *hid)
+{
+ struct amdtp_hid_data *hid_data = hid->driver_data;
+ struct amdtp_cl_data *cli_data = hid_data->cli_data;
+ int i, ret = 0;
+
+ for (i = 0; i < cli_data->num_hid_devices; i++) {
+ if (cli_data->hid_sensor_hubs[i] == hid)
+ break;
+ }
+
+ if (!cli_data->request_done[i])
+ ret = wait_event_interruptible_timeout(hid_data->hid_wait,
+ cli_data->request_done[i],
+ msecs_to_jiffies(AMD_SFH_RESPONSE_TIMEOUT));
+ if (ret == -ERESTARTSYS)
+ return -ERESTARTSYS;
+ else if (ret < 0)
+ return -ETIMEDOUT;
+ else
+ return 0;
+}
+
+void amdtp_hid_wakeup(struct hid_device *hid)
+{
+ struct amdtp_hid_data *hid_data = hid->driver_data;
+ struct amdtp_cl_data *cli_data = hid_data->cli_data;
+
+ cli_data->request_done[cli_data->cur_hid_dev] = true;
+ wake_up_interruptible(&hid_data->hid_wait);
+}
+
+static struct hid_ll_driver amdtp_hid_ll_driver = {
+ .parse = amdtp_hid_parse,
+ .start = amdtp_hid_start,
+ .stop = amdtp_hid_stop,
+ .open = amdtp_hid_open,
+ .close = amdtp_hid_close,
+ .request = amdtp_hid_request,
+ .wait = amdtp_wait_for_response,
+ .raw_request = amdtp_raw_request,
+};
+
+int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data)
+{
+ struct hid_device *hid;
+ struct amdtp_hid_data *hid_data;
+ int rc;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid_data = kzalloc(sizeof(*hid_data), GFP_KERNEL);
+ if (!hid_data) {
+ rc = -ENOMEM;
+ goto err_hid_data;
+ }
+
+ hid->ll_driver = &amdtp_hid_ll_driver;
+ hid_data->index = cur_hid_dev;
+ hid_data->cli_data = cli_data;
+ init_waitqueue_head(&hid_data->hid_wait);
+
+ hid->driver_data = hid_data;
+ cli_data->hid_sensor_hubs[cur_hid_dev] = hid;
+ hid->bus = BUS_AMD_AMDTP;
+ hid->vendor = AMD_SFH_HID_VENDOR;
+ hid->product = AMD_SFH_HID_PRODUCT;
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-amdtp",
+ hid->vendor, hid->product);
+
+ rc = hid_add_device(hid);
+ if (rc)
+ goto err_hid_device;
+ return 0;
+
+err_hid_device:
+ kfree(hid_data);
+err_hid_data:
+ hid_destroy_device(hid);
+ return rc;
+}
+
+void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
+{
+ int i;
+
+ for (i = 0; i < cli_data->num_hid_devices; ++i) {
+ kfree(cli_data->feature_report[i]);
+ kfree(cli_data->input_report[i]);
+ kfree(cli_data->report_descr[i]);
+ if (cli_data->hid_sensor_hubs[i]) {
+ kfree(cli_data->hid_sensor_hubs[i]->driver_data);
+ hid_destroy_device(cli_data->hid_sensor_hubs[i]);
+ cli_data->hid_sensor_hubs[i] = NULL;
+ }
+ }
+}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
new file mode 100644
index 000000000000..d7eac1728e31
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMD MP2 Sensors transport driver
+ *
+ * Authors: Nehal Bakulchandra Shah <Nehal-bakulchandra.shah@amd.com>
+ * Sandeep Singh <sandeep.singh@amd.com>
+ */
+
+#ifndef AMDSFH_HID_H
+#define AMDSFH_HID_H
+
+#define MAX_HID_DEVICES 4
+#define BUS_AMD_AMDTP 0x20
+#define AMD_SFH_HID_VENDOR 0x1022
+#define AMD_SFH_HID_PRODUCT 0x0001
+
+struct amdtp_cl_data {
+ u8 init_done;
+ u32 cur_hid_dev;
+ u32 hid_dev_count;
+ u32 num_hid_devices;
+ struct device_info *hid_devices;
+ u8 *report_descr[MAX_HID_DEVICES];
+ int report_descr_sz[MAX_HID_DEVICES];
+ struct hid_device *hid_sensor_hubs[MAX_HID_DEVICES];
+ u8 *hid_descr[MAX_HID_DEVICES];
+ int hid_descr_size[MAX_HID_DEVICES];
+ phys_addr_t phys_addr_base;
+ u32 *sensor_virt_addr[MAX_HID_DEVICES];
+ dma_addr_t sensor_dma_addr[MAX_HID_DEVICES];
+ u32 sensor_sts[MAX_HID_DEVICES];
+ u32 sensor_requested_cnt[MAX_HID_DEVICES];
+ u8 report_type[MAX_HID_DEVICES];
+ u8 report_id[MAX_HID_DEVICES];
+ u8 sensor_idx[MAX_HID_DEVICES];
+ u8 *feature_report[MAX_HID_DEVICES];
+ u8 *input_report[MAX_HID_DEVICES];
+ u8 request_done[MAX_HID_DEVICES];
+ struct delayed_work work;
+ struct delayed_work work_buffer;
+};
+
+/**
+ * struct amdtp_hid_data - Per instance HID data
+ * @index: Device index in the order of enumeration
+ * @request_done: Get Feature/Input report complete flag
+ * used during get/set request from hid core
+ * @cli_data: Link to the client instance
+ * @hid_wait: Completion waitq
+ *
+ * Used to tie hid->driver data to driver client instance
+ */
+struct amdtp_hid_data {
+ int index;
+ struct amdtp_cl_data *cli_data;
+ wait_queue_head_t hid_wait;
+};
+
+/* Interface functions between HID LL driver and AMD SFH client */
+void hid_amdtp_set_feature(struct hid_device *hid, char *buf, u32 len, int report_id);
+void hid_amdtp_get_report(struct hid_device *hid, int report_id, int report_type);
+int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data);
+void amdtp_hid_remove(struct amdtp_cl_data *cli_data);
+int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type);
+void amd_sfh_set_report(struct hid_device *hid, int report_id, int report_type);
+void amdtp_hid_wakeup(struct hid_device *hid);
+#endif
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
new file mode 100644
index 000000000000..dbac16641662
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD MP2 PCIe communication driver
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Sandeep Singh <Sandeep.singh@amd.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "amd_sfh_pcie.h"
+
+#define DRIVER_NAME "pcie_mp2_amd"
+#define DRIVER_DESC "AMD(R) PCIe MP2 Communication Driver"
+
+#define ACEL_EN BIT(0)
+#define GYRO_EN BIT(1)
+#define MAGNO_EN BIT(2)
+#define ALS_EN BIT(19)
+
+void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
+{
+ union sfh_cmd_param cmd_param;
+ union sfh_cmd_base cmd_base;
+
+ /* fill up command register */
+ memset(&cmd_base, 0, sizeof(cmd_base));
+ cmd_base.s.cmd_id = ENABLE_SENSOR;
+ cmd_base.s.period = info.period;
+ cmd_base.s.sensor_id = info.sensor_idx;
+
+ /* fill up command param register */
+ memset(&cmd_param, 0, sizeof(cmd_param));
+ cmd_param.s.buf_layout = 1;
+ cmd_param.s.buf_length = 16;
+
+ writeq(info.dma_address, privdata->mmio + AMD_C2P_MSG2);
+ writel(cmd_param.ul, privdata->mmio + AMD_C2P_MSG1);
+ writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
+}
+
+void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
+{
+ union sfh_cmd_base cmd_base;
+
+ /* fill up command register */
+ memset(&cmd_base, 0, sizeof(cmd_base));
+ cmd_base.s.cmd_id = DISABLE_SENSOR;
+ cmd_base.s.period = 0;
+ cmd_base.s.sensor_id = sensor_idx;
+
+ writeq(0x0, privdata->mmio + AMD_C2P_MSG2);
+ writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
+}
+
+void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
+{
+ union sfh_cmd_base cmd_base;
+
+ /* fill up command register */
+ memset(&cmd_base, 0, sizeof(cmd_base));
+ cmd_base.s.cmd_id = STOP_ALL_SENSORS;
+ cmd_base.s.period = 0;
+ cmd_base.s.sensor_id = 0;
+
+ writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
+}
+
+int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
+{
+ int activestatus, num_of_sensors = 0;
+
+ privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
+ activestatus = privdata->activecontrolstatus >> 4;
+ if (ACEL_EN & activestatus)
+ sensor_id[num_of_sensors++] = accel_idx;
+
+ if (GYRO_EN & activestatus)
+ sensor_id[num_of_sensors++] = gyro_idx;
+
+ if (MAGNO_EN & activestatus)
+ sensor_id[num_of_sensors++] = mag_idx;
+
+ if (ALS_EN & activestatus)
+ sensor_id[num_of_sensors++] = als_idx;
+
+ return num_of_sensors;
+}
+
+static void amd_mp2_pci_remove(void *privdata)
+{
+ amd_sfh_hid_client_deinit(privdata);
+ amd_stop_all_sensors(privdata);
+}
+
+static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct amd_mp2_dev *privdata;
+ int rc;
+
+ privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
+ if (!privdata)
+ return -ENOMEM;
+
+ privdata->pdev = pdev;
+ pci_set_drvdata(pdev, privdata);
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, BIT(2), DRIVER_NAME);
+ if (rc)
+ return rc;
+
+ privdata->mmio = pcim_iomap_table(pdev)[2];
+ pci_set_master(pdev);
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ return rc;
+ }
+ rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
+ if (rc)
+ return rc;
+
+ return amd_sfh_hid_client_init(privdata);
+}
+
+static const struct pci_device_id amd_mp2_pci_tbl[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_MP2) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, amd_mp2_pci_tbl);
+
+static struct pci_driver amd_mp2_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = amd_mp2_pci_tbl,
+ .probe = amd_mp2_pci_probe,
+};
+module_pci_driver(amd_mp2_pci_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Shyam Sundar S K <Shyam-sundar.S-k@amd.com>");
+MODULE_AUTHOR("Sandeep Singh <Sandeep.singh@amd.com>");
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
new file mode 100644
index 000000000000..8f8d19b2cfe5
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMD MP2 PCIe communication driver
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Sandeep Singh <Sandeep.singh@amd.com>
+ */
+
+#ifndef PCIE_MP2_AMD_H
+#define PCIE_MP2_AMD_H
+
+#include <linux/pci.h>
+
+#define PCI_DEVICE_ID_AMD_MP2 0x15E4
+
+#define ENABLE_SENSOR 1
+#define DISABLE_SENSOR 2
+#define STOP_ALL_SENSORS 8
+
+/* MP2 C2P Message Registers */
+#define AMD_C2P_MSG0 0x10500
+#define AMD_C2P_MSG1 0x10504
+#define AMD_C2P_MSG2 0x10508
+
+/* MP2 P2C Message Registers */
+#define AMD_P2C_MSG3 0x1068C /* Supported Sensors info */
+
+/* SFH Command register */
+union sfh_cmd_base {
+ u32 ul;
+ struct {
+ u32 cmd_id : 8;
+ u32 sensor_id : 8;
+ u32 period : 16;
+ } s;
+};
+
+union sfh_cmd_param {
+ u32 ul;
+ struct {
+ u32 buf_layout : 2;
+ u32 buf_length : 6;
+ u32 rsvd : 24;
+ } s;
+};
+
+struct sfh_cmd_reg {
+ union sfh_cmd_base cmd_base;
+ union sfh_cmd_param cmd_param;
+ phys_addr_t phys_addr;
+};
+
+enum sensor_idx {
+ accel_idx = 0,
+ gyro_idx = 1,
+ mag_idx = 2,
+ als_idx = 19
+};
+
+struct amd_mp2_dev {
+ struct pci_dev *pdev;
+ struct amdtp_cl_data *cl_data;
+ void __iomem *mmio;
+ u32 activecontrolstatus;
+};
+
+struct amd_mp2_sensor_info {
+ u8 sensor_idx;
+ u32 period;
+ dma_addr_t dma_address;
+};
+
+void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
+void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx);
+void amd_stop_all_sensors(struct amd_mp2_dev *privdata);
+int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id);
+int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata);
+int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata);
+#endif
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
new file mode 100644
index 000000000000..6e3ad66e57a4
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD SFH Report Descriptor generator
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ * Authors: Nehal Bakulchandra Shah <Nehal-Bakulchandra.Shah@amd.com>
+ * Sandeep Singh <sandeep.singh@amd.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include "amd_sfh_pcie.h"
+#include "amd_sfh_hid_desc.h"
+#include "amd_sfh_hid_report_desc.h"
+
+#define AMD_SFH_FW_MULTIPLIER (1000)
+#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x41
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x51
+#define HID_DEFAULT_REPORT_INTERVAL 0x50
+#define HID_DEFAULT_MIN_VALUE 0X7F
+#define HID_DEFAULT_MAX_VALUE 0x80
+#define HID_DEFAULT_SENSITIVITY 0x7F
+#define HID_USAGE_SENSOR_PROPERTY_CONNECTION_TYPE_PC_INTEGRATED_ENUM 0x01
+/* state enums */
+#define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
+#define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
+#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
+
+int get_report_descriptor(int sensor_idx, u8 *rep_desc)
+{
+ switch (sensor_idx) {
+ case accel_idx: /* accel */
+ memset(rep_desc, 0, sizeof(accel3_report_descriptor));
+ memcpy(rep_desc, accel3_report_descriptor,
+ sizeof(accel3_report_descriptor));
+ break;
+ case gyro_idx: /* gyro */
+ memset(rep_desc, 0, sizeof(gyro3_report_descriptor));
+ memcpy(rep_desc, gyro3_report_descriptor,
+ sizeof(gyro3_report_descriptor));
+ break;
+ case mag_idx: /* Magnetometer */
+ memset(rep_desc, 0, sizeof(comp3_report_descriptor));
+ memcpy(rep_desc, comp3_report_descriptor,
+ sizeof(comp3_report_descriptor));
+ break;
+ case als_idx: /* ambient light sensor */
+ memset(rep_desc, 0, sizeof(als_report_descriptor));
+ memcpy(rep_desc, als_report_descriptor,
+ sizeof(als_report_descriptor));
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+u32 get_descr_sz(int sensor_idx, int descriptor_name)
+{
+ switch (sensor_idx) {
+ case accel_idx:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(accel3_report_descriptor);
+ case input_size:
+ return sizeof(struct accel3_input_report);
+ case feature_size:
+ return sizeof(struct accel3_feature_report);
+ }
+ break;
+ case gyro_idx:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(gyro3_report_descriptor);
+ case input_size:
+ return sizeof(struct gyro_input_report);
+ case feature_size:
+ return sizeof(struct gyro_feature_report);
+ }
+ break;
+ case mag_idx:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(comp3_report_descriptor);
+ case input_size:
+ return sizeof(struct magno_input_report);
+ case feature_size:
+ return sizeof(struct magno_feature_report);
+ }
+ break;
+ case als_idx:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(als_report_descriptor);
+ case input_size:
+ return sizeof(struct als_input_report);
+ case feature_size:
+ return sizeof(struct als_feature_report);
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void get_common_features(struct common_feature_property *common, int report_id)
+{
+ common->report_id = report_id;
+ common->connection_type = HID_USAGE_SENSOR_PROPERTY_CONNECTION_TYPE_PC_INTEGRATED_ENUM;
+ common->report_state = HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM;
+ common->power_state = HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM;
+ common->sensor_state = HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM;
+ common->report_interval = HID_DEFAULT_REPORT_INTERVAL;
+}
+
+u8 get_feature_report(int sensor_idx, int report_id, u8 *feature_report)
+{
+ struct accel3_feature_report acc_feature;
+ struct gyro_feature_report gyro_feature;
+ struct magno_feature_report magno_feature;
+ struct als_feature_report als_feature;
+ u8 report_size = 0;
+
+ if (!feature_report)
+ return report_size;
+
+ switch (sensor_idx) {
+ case accel_idx: /* accel */
+ get_common_features(&acc_feature.common_property, report_id);
+ acc_feature.accel_change_sesnitivity = HID_DEFAULT_SENSITIVITY;
+ acc_feature.accel_sensitivity_min = HID_DEFAULT_MIN_VALUE;
+ acc_feature.accel_sensitivity_max = HID_DEFAULT_MAX_VALUE;
+ memcpy(feature_report, &acc_feature, sizeof(acc_feature));
+ report_size = sizeof(acc_feature);
+ break;
+ case gyro_idx: /* gyro */
+ get_common_features(&gyro_feature.common_property, report_id);
+ gyro_feature.gyro_change_sesnitivity = HID_DEFAULT_SENSITIVITY;
+ gyro_feature.gyro_sensitivity_min = HID_DEFAULT_MIN_VALUE;
+ gyro_feature.gyro_sensitivity_max = HID_DEFAULT_MAX_VALUE;
+ memcpy(feature_report, &gyro_feature, sizeof(gyro_feature));
+ report_size = sizeof(gyro_feature);
+ break;
+ case mag_idx: /* Magnetometer */
+ get_common_features(&magno_feature.common_property, report_id);
+ magno_feature.magno_headingchange_sensitivity = HID_DEFAULT_SENSITIVITY;
+ magno_feature.heading_min = HID_DEFAULT_MIN_VALUE;
+ magno_feature.heading_max = HID_DEFAULT_MAX_VALUE;
+ magno_feature.flux_change_sensitivity = HID_DEFAULT_MIN_VALUE;
+ magno_feature.flux_min = HID_DEFAULT_MIN_VALUE;
+ magno_feature.flux_max = HID_DEFAULT_MAX_VALUE;
+ memcpy(feature_report, &magno_feature, sizeof(magno_feature));
+ report_size = sizeof(magno_feature);
+ break;
+ case als_idx: /* ambient light sensor */
+ get_common_features(&als_feature.common_property, report_id);
+ als_feature.als_change_sesnitivity = HID_DEFAULT_SENSITIVITY;
+ als_feature.als_sensitivity_min = HID_DEFAULT_MIN_VALUE;
+ als_feature.als_sensitivity_max = HID_DEFAULT_MAX_VALUE;
+ memcpy(feature_report, &als_feature, sizeof(als_feature));
+ report_size = sizeof(als_feature);
+ break;
+ default:
+ break;
+ }
+ return report_size;
+}
+
+static void get_common_inputs(struct common_input_property *common, int report_id)
+{
+ common->report_id = report_id;
+ common->sensor_state = HID_USAGE_SENSOR_STATE_READY_ENUM;
+ common->event_type = HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM;
+}
+
+u8 get_input_report(int sensor_idx, int report_id, u8 *input_report, u32 *sensor_virt_addr)
+{
+ struct accel3_input_report acc_input;
+ struct gyro_input_report gyro_input;
+ struct magno_input_report magno_input;
+ struct als_input_report als_input;
+ u8 report_size = 0;
+
+ if (!sensor_virt_addr || !input_report)
+ return report_size;
+
+ switch (sensor_idx) {
+ case accel_idx: /* accel */
+ get_common_inputs(&acc_input.common_property, report_id);
+ acc_input.in_accel_x_value = (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
+ acc_input.in_accel_y_value = (int)sensor_virt_addr[1] / AMD_SFH_FW_MULTIPLIER;
+ acc_input.in_accel_z_value = (int)sensor_virt_addr[2] / AMD_SFH_FW_MULTIPLIER;
+ memcpy(input_report, &acc_input, sizeof(acc_input));
+ report_size = sizeof(acc_input);
+ break;
+ case gyro_idx: /* gyro */
+ get_common_inputs(&gyro_input.common_property, report_id);
+ gyro_input.in_angel_x_value = (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
+ gyro_input.in_angel_y_value = (int)sensor_virt_addr[1] / AMD_SFH_FW_MULTIPLIER;
+ gyro_input.in_angel_z_value = (int)sensor_virt_addr[2] / AMD_SFH_FW_MULTIPLIER;
+ memcpy(input_report, &gyro_input, sizeof(gyro_input));
+ report_size = sizeof(gyro_input);
+ break;
+ case mag_idx: /* Magnetometer */
+ get_common_inputs(&magno_input.common_property, report_id);
+ magno_input.in_magno_x = (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
+ magno_input.in_magno_y = (int)sensor_virt_addr[1] / AMD_SFH_FW_MULTIPLIER;
+ magno_input.in_magno_z = (int)sensor_virt_addr[2] / AMD_SFH_FW_MULTIPLIER;
+ magno_input.in_magno_accuracy = (u16)sensor_virt_addr[3] / AMD_SFH_FW_MULTIPLIER;
+ memcpy(input_report, &magno_input, sizeof(magno_input));
+ report_size = sizeof(magno_input);
+ break;
+ case als_idx: /* Als */
+ get_common_inputs(&als_input.common_property, report_id);
+ als_input.illuminance_value = (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
+ report_size = sizeof(als_input);
+ memcpy(input_report, &als_input, sizeof(als_input));
+ break;
+ default:
+ break;
+ }
+ return report_size;
+}
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
new file mode 100644
index 000000000000..095c471d8fd6
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * HID report descriptors, structures and routines
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ * Authors: Nehal Bakulchandra Shah <Nehal-bakulchandra.shah@amd.com>
+ * Sandeep Singh <Sandeep.singh@amd.com>
+ */
+
+#ifndef AMD_SFH_HID_DESCRIPTOR_H
+#define AMD_SFH_HID_DESCRIPTOR_H
+
+enum desc_type {
+ /* Report descriptor name */
+ descr_size = 1,
+ input_size,
+ feature_size,
+};
+
+struct common_feature_property {
+ /* common properties */
+ u8 report_id;
+ u8 connection_type;
+ u8 report_state;
+ u8 power_state;
+ u8 sensor_state;
+ u32 report_interval;
+} __packed;
+
+struct common_input_property {
+ /* common properties */
+ u8 report_id;
+ u8 sensor_state;
+ u8 event_type;
+} __packed;
+
+struct accel3_feature_report {
+ struct common_feature_property common_property;
+ /* properties specific to this sensor */
+ u16 accel_change_sesnitivity;
+ s16 accel_sensitivity_max;
+ s16 accel_sensitivity_min;
+} __packed;
+
+struct accel3_input_report {
+ struct common_input_property common_property;
+ /* values specific to this sensor */
+ int in_accel_x_value;
+ int in_accel_y_value;
+ int in_accel_z_value;
+ /* include if required to support the "shake" event */
+ u8 in_accel_shake_detection;
+} __packed;
+
+struct gyro_feature_report {
+ struct common_feature_property common_property;
+ /* properties specific to this sensor */
+ u16 gyro_change_sesnitivity;
+ s16 gyro_sensitivity_max;
+ s16 gyro_sensitivity_min;
+} __packed;
+
+struct gyro_input_report {
+ struct common_input_property common_property;
+ /* values specific to this sensor */
+ int in_angel_x_value;
+ int in_angel_y_value;
+ int in_angel_z_value;
+} __packed;
+
+struct magno_feature_report {
+ struct common_feature_property common_property;
+ /*properties specific to this sensor */
+ u16 magno_headingchange_sensitivity;
+ s16 heading_min;
+ s16 heading_max;
+ u16 flux_change_sensitivity;
+ s16 flux_min;
+ s16 flux_max;
+} __packed;
+
+struct magno_input_report {
+ struct common_input_property common_property;
+ int in_magno_x;
+ int in_magno_y;
+ int in_magno_z;
+ int in_magno_accuracy;
+} __packed;
+
+struct als_feature_report {
+ struct common_feature_property common_property;
+ /* properties specific to this sensor */
+ u16 als_change_sesnitivity;
+ s16 als_sensitivity_max;
+ s16 als_sensitivity_min;
+} __packed;
+
+struct als_input_report {
+ struct common_input_property common_property;
+ /* values specific to this sensor */
+ int illuminance_value;
+} __packed;
+
+int get_report_descriptor(int sensor_idx, u8 rep_desc[]);
+u32 get_descr_sz(int sensor_idx, int descriptor_name);
+u8 get_feature_report(int sensor_idx, int report_id, u8 *feature_report);
+u8 get_input_report(int sensor_idx, int report_id, u8 *input_report, u32 *sensor_virt_addr);
+#endif
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h
new file mode 100644
index 000000000000..44271d39b322
--- /dev/null
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h
@@ -0,0 +1,645 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * HID descriptor stuructures
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ * Authors: Nehal Bakulchandra Shah <Nehal-bakulchandra.shah@amd.com>
+ * Sandeep Singh <Sandeep.singh@amd.com>
+ */
+
+#ifndef AMD_SFH_HID_REPORT_DESCRIPTOR_H
+#define AMD_SFH_HID_REPORT_DESCRIPTOR_H
+
+// Accelerometer 3D Sensor
+static const u8 accel3_report_descriptor[] = {
+0x05, 0x20, /* Usage page */
+0x09, 0x73, /* Motion type Accel 3D */
+0xA1, 0x00, /* HID Collection (Physical) */
+
+//feature reports(xmit/receive)
+0x85, 1, /* HID Report ID */
+0x05, 0x20, /* HID usage page sensor */
+0x0A, 0x09, 0x03, /* Sensor property and sensor connection type */
+0x15, 0, /* HID logical MIN_8(0) */
+0x25, 2, /* HID logical MAX_8(2) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection (logical) */
+0x0A, 0x30, 0x08, /* Sensor property connection type intergated sel*/
+0x0A, 0x31, 0x08, /* Sensor property connection type attached sel */
+0x0A, 0x32, 0x08, /* Sensor property connection type external sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x16, 0x03, /* HID usage sensor property reporting state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x40, 0x08, /* Sensor property report state no events sel */
+0x0A, 0x41, 0x08, /* Sensor property report state all events sel */
+0x0A, 0x42, 0x08, /* Sensor property report state threshold events sel */
+0x0A, 0x43, 0x08, /* Sensor property report state no events wake sel */
+0x0A, 0x44, 0x08, /* Sensor property report state all events wake sel */
+0x0A, 0x45, 0x08, /* Sensor property report state threshold events wake sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x19, 0x03, /* HID usage sensor property power state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x50, 0x08, /* Sensor property power state undefined sel */
+0x0A, 0x51, 0x08, /* Sensor property power state D0 full power sel */
+0x0A, 0x52, 0x08, /* Sensor property power state D1 low power sel */
+0x0A, 0x53, 0x08, /* Sensor property power state D2 standby with wake sel */
+0x0A, 0x54, 0x08, /* Sensor property power state D3 sleep with wake sel */
+0x0A, 0x55, 0x08, /* Sensor property power state D4 power off sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x0E, 0x03, /* HID usage sensor property report interval */
+0x15, 0, /* HID logical Min_8(0) */
+0x27, 0xFF, 0xFF, 0xFF, 0xFF, /* HID logical Max_32 */
+
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count(1) */
+0x55, 0, /* HID unit exponent(0) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x52, 0x14, /* Sensor data motion accel and mod change sensitivity ABS) */
+
+0x15, 0, /* HID logical Min_8(0) */
+0x26, 0xFF, 0xFF, /* HID logical Max_16(0xFF,0xFF) */
+
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x52, 0x24, /* HID usage sensor data (motion accel and mod max) */
+
+0x16, 0x01, 0x80, /* HID logical Min_16(0x01,0x80) */
+
+0x26, 0xFF, 0x7F, /* HID logical Max_16(0xFF,0x7F) */
+
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x52, 0x34, /* HID usage sensor data (motion accel and mod min) */
+
+0x16, 0x01, 0x80, /* HID logical Min_16(0x01,0x80) */
+
+0x26, 0xFF, 0x7F, /* HID logical Max_16(0xFF,0x7F) */
+
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+
+//input report (transmit)
+0x05, 0x20, /* HID usage page sensors */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x02, 0x02, /* HID usage sensor event */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x10, 0x08, /* HID usage sensor event unknown sel */
+0x0A, 0x11, 0x08, /* HID usage sensor event state changed sel */
+0x0A, 0x12, 0x08, /* HID usage sensor event property changed sel */
+0x0A, 0x13, 0x08, /* HID usage sensor event data updated sel */
+0x0A, 0x14, 0x08, /* HID usage sensor event poll response sel */
+0x0A, 0x15, 0x08, /* HID usage sensor event change sensitivity sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x53, 0x04, /* HID usage sensor data motion Acceleration X axis */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+
+0x27, 0xFF, 0xff, 0XFF, 0XFF, /* HID logical Max_32 */
+
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0x0A, 0x54, 0x04, /* HID usage sensor data motion Acceleration Y axis */
+0x17, 0X00, 0X00, 0x01, 0x80, /* HID logical Min_32 */
+
+0x27, 0xFF, 0xFF, 0XFF, 0XFF, /* HID logical Max_32 */
+
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0x0A, 0x55, 0x04, /* HID usage sensor data motion Acceleration Z axis */
+0x17, 0X00, 0X00, 0x01, 0x80, /* HID logical Min_32 */
+
+0x27, 0XFF, 0XFF, 0xFF, 0x7F, /* HID logical Max_32 */
+
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+
+0x0A, 0x51, 0x04, /* HID usage sensor data motion state */
+0x15, 0, /* HID logical Min_8(0) False = Still*/
+0x25, 1, /* HID logical Min_8(1) True = In motion */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0xC0 /* HID end collection */
+};
+
+const u8 gyro3_report_descriptor[] = {
+0x05, 0x20, /* Usage page */
+0x09, 0x76, /* Motion type Gyro3D */
+0xA1, 0x00, /* HID Collection (Physical) */
+
+0x85, 2, /* HID Report ID */
+0x05, 0x20, /* HID usage page sensor */
+0x0A, 0x09, 0x03, /* Sensor property and sensor connection type */
+0x15, 0, /* HID logical MIN_8(0) */
+0x25, 2, /* HID logical MAX_8(2) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection (logical) */
+0x0A, 0x30, 0x08, /* Sensor property connection type intergated sel */
+0x0A, 0x31, 0x08, /* Sensor property connection type attached sel */
+0x0A, 0x32, 0x08, /* Sensor property connection type external sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x16, 0x03, /* HID usage sensor property reporting state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x40, 0x08, /* Sensor reporting state no events sel */
+0x0A, 0x41, 0x08, /* Sensor reporting state all events sel */
+0x0A, 0x42, 0x08, /* Sensor reporting state threshold events sel */
+0x0A, 0x43, 0x08, /* Sensor reporting state no events wake sel */
+0x0A, 0x44, 0x08, /* Sensor reporting state all events wake sel */
+0x0A, 0x45, 0x08, /* Sensor reporting state threshold events wake sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x19, 0x03, /* HID usage sensor property power state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x50, 0x08, /* Sensor power state undefined sel */
+0x0A, 0x51, 0x08, /* Sensor power state D0 full power sel */
+0x0A, 0x52, 0x08, /* Sensor power state D1 low power sel */
+0x0A, 0x53, 0x08, /* Sensor power state D2 standby with wake sel */
+0x0A, 0x54, 0x08, /* Sensor power state D3 sleep with wake sel */
+0x0A, 0x55, 0x08, /* Sensor power state D4 power off sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x0E, 0x03, /* HID usage sensor property report interval */
+0x15, 0, /* HID logical Min_8(0) */
+0x27, 0xFF, 0xFF, 0xFF, 0xFF, /* HID logical Max_32 */
+
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count(1) */
+0x55, 0, /* HID unit exponent(0) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x56, 0x14, /* Angular velocity and mod change sensitivity ABS)*/
+
+0x15, 0, /* HID logical Min_8(0) */
+0x26, 0xFF, 0xFF, /* HID logical Max_16(0xFF,0xFF) */
+
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x56, 0x24, /* Sensor data (motion angular velocity and mod max) */
+
+0x16, 0x01, 0x80, /* HID logical Min_16(0x01,0x80) */
+
+0x26, 0xFF, 0x7F, /* HID logical Max_16(0xFF,0x7F) */
+
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x56, 0x34, /* HID usage sensor data (motion accel and mod min) */
+
+0x16, 0x01, 0x80, /* HID logical Min_16(0x01,0x80) */
+
+0x26, 0xFF, 0x7F, /* HID logical Max_16(0xFF,0x7F) */
+
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+
+//Input reports(transmit)
+0x05, 0x20, /* HID usage page sensors */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x02, 0x02, /* HID usage sensor event */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x10, 0x08, /* HID usage sensor event unknown sel */
+0x0A, 0x11, 0x08, /* HID usage sensor event state changed sel */
+0x0A, 0x12, 0x08, /* HID usage sensor event property changed sel */
+0x0A, 0x13, 0x08, /* HID usage sensor event data updated sel */
+0x0A, 0x14, 0x08, /* HID usage sensor event poll response sel */
+0x0A, 0x15, 0x08, /* HID usage sensor event change sensitivity sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x57, 0x04, /* Sensor data motion Angular velocity X axis */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0x0A, 0x58, 0x04, /* Sensor data motion Angular velocity Y axis */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0x0A, 0x59, 0x04, /* Sensor data motion Angular velocity Z axis */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+
+0xC0, /* HID end collection */
+};
+
+const u8 comp3_report_descriptor[] = {
+0x05, 0x20, /* Usage page */
+0x09, 0x83, /* Motion type Orientation compass 3D */
+0xA1, 0x00, /* HID Collection (Physical) */
+
+0x85, 3, /* HID Report ID */
+0x05, 0x20, /* HID usage page sensor */
+0x0A, 0x09, 0x03, /* Sensor property and sensor connection type */
+0x15, 0, /* HID logical MIN_8(0) */
+0x25, 2, /* HID logical MAX_8(2) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection (logical) */
+0x0A, 0x30, 0x08, /* Sensor property connection type intergated sel */
+0x0A, 0x31, 0x08, /* Sensor property connection type attached sel */
+0x0A, 0x32, 0x08, /* Sensor property connection type external sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x16, 0x03, /* HID usage sensor property reporting state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x40, 0x08, /* Sensor reporting state no events sel */
+0x0A, 0x41, 0x08, /* Sensor reporting state all events sel */
+0x0A, 0x42, 0x08, /* Sensor reporting state threshold events sel */
+0x0A, 0x43, 0x08, /* Sensor reporting state no events wake sel */
+0x0A, 0x44, 0x08, /* Sensor reporting state all events wake sel */
+0x0A, 0x45, 0x08, /* Sensor reporting state threshold events wake sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x19, 0x03, /* HID usage sensor property power state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x50, 0x08, /* Sensor power state undefined sel */
+0x0A, 0x51, 0x08, /* Sensor power state D0 full power sel */
+0x0A, 0x52, 0x08, /* Sensor power state D1 low power sel */
+0x0A, 0x53, 0x08, /* Sensor power state D2 standby with wake sel */
+0x0A, 0x54, 0x08, /* Sensor power state D3 sleep with wake sel */
+0x0A, 0x55, 0x08, /* Sensor power state D4 power off sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x0E, 0x03, /* HID usage sensor property report interval */
+0x15, 0, /* HID logical Min_8(0) */
+0x27, 0xFF, 0xFF, 0xFF, 0xFF, /* HID logical Max_32 */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count(1) */
+0x55, 0, /* HID unit exponent(0) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x71, 0x14, /* Orientation and mod change sensitivity ABS)*/
+0x15, 0, /* HID logical Min_8(0) */
+0x26, 0xFF, 0xFF, /* HID logical Max_16(0xFF,0xFF) */
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x71, 0x24, /* Sensor data (motion orientation and mod max) */
+0x16, 0x01, 0x80, /* HID logical Min_16(0x01,0x80) */
+0x26, 0xFF, 0x7F, /* HID logical Max_16(0xFF,0x7F) */
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0F, /* HID unit exponent(0x0F) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x71, 0x34, /* Sensor data (motion orientation and mod min) */
+0x16, 0x01, 0x80, /* HID logical Min_16(0x01,0x80) */
+0x26, 0xFF, 0x7F, /* HID logical Max_16(0xFF,0x7F) */
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0F, /* HID unit exponent(0x0F) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x84, 0x14, /* Maganetic flux and change sensitivity ABS) */
+0x15, 0, /* HID logical Min_8(0) */
+0x26, 0xFF, 0xFF, /* HID logical Max_16(0xFF,0xFF) */
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x84, 0x24, /* Maganetic flux and mod change sensitivity Max) */
+0x16, 0x01, 0x80, /* HID logical Min_16(0x01,0x80) */
+0x26, 0xFF, 0x7F, /* HID logical Max_16(0xFF,0x7F) */
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0F, /* HID unit exponent(0x0F) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0x84, 0x34, /* Maganetic flux and mod change sensitivity Min */
+0x16, 0x01, 0x80, /* HID logical Min_16(0x01,0x80) */
+0x26, 0xFF, 0x7F, /* HID logical Max_16(0xFF,0x7F) */
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0F, /* HID unit exponent(0x0F) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+
+//Input reports(transmit)
+0x05, 0x20, /* HID usage page sensors */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x02, 0x02, /* HID usage sensor event */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x10, 0x08, /* HID usage sensor event unknown sel */
+0x0A, 0x11, 0x08, /* HID usage sensor event state changed sel */
+0x0A, 0x12, 0x08, /* HID usage sensor event property changed sel */
+0x0A, 0x13, 0x08, /* HID usage sensor event data updated sel */
+0x0A, 0x14, 0x08, /* HID usage sensor event poll response sel */
+0x0A, 0x15, 0x08, /* HID usage sensor event change sensitivity sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x85, 0x04, /* Sensor data orientation magnetic flux X axis */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0x55, 0x0D, /* HID unit exponent(0x0D) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0x0A, 0x86, 0x04, /* Sensor data orientation magnetic flux Y axis */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0x55, 0x0D, /* HID unit exponent(0x0D) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0x0A, 0x87, 0x04, /* Sensor data orientation magnetic flux Z axis */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0x55, 0x0D, /* HID unit exponent(0x0D) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0x0A, 0x88, 0x04, /* Sensor data orientation magnetometer accuracy */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0xC0 /* HID end collection */
+};
+
+const u8 als_report_descriptor[] = {
+0x05, 0x20, /* HID usage page sensor */
+0x09, 0x41, /* HID usage sensor type Ambientlight */
+0xA1, 0x00, /* HID Collection (Physical) */
+
+//feature reports(xmit/receive)//
+0x85, 4, /* HID Report ID */
+0x05, 0x20, /* HID usage page sensor */
+0x0A, 0x09, 0x03, /* Sensor property and sensor connection type */
+0x15, 0, /* HID logical MIN_8(0) */
+0x25, 2, /* HID logical MAX_8(2) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection (logical) */
+0x0A, 0x30, 0x08, /* Sensor property connection type intergated sel */
+0x0A, 0x31, 0x08, /* Sensor property connection type attached sel */
+0x0A, 0x32, 0x08, /* Sensor property connection type external sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x16, 0x03, /* HID usage sensor property reporting state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x40, 0x08, /* Sensor reporting state no events sel */
+0x0A, 0x41, 0x08, /* Sensor reporting state all events sel */
+0x0A, 0x42, 0x08, /* Sensor reporting state threshold events sel */
+0x0A, 0x43, 0x08, /* Sensor reporting state no events wake sel */
+0x0A, 0x44, 0x08, /* Sensor reporting state all events wake sel */
+0x0A, 0x45, 0x08, /* Sensor reporting state threshold events wake sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x19, 0x03, /* HID usage sensor property power state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x50, 0x08, /* Sensor power state undefined sel */
+0x0A, 0x51, 0x08, /* Sensor power state D0 full power sel */
+0x0A, 0x52, 0x08, /* Sensor power state D1 low power sel */
+0x0A, 0x53, 0x08, /* Sensor power state D2 standby with wake sel */
+0x0A, 0x54, 0x08, /* Sensor power state D3 sleep with wake sel */
+0x0A, 0x55, 0x08, /* Sensor power state D4 power off sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x0E, 0x03, /* HID usage sensor property report interval */
+0x15, 0, /* HID logical Min_8(0) */
+0x27, 0xFF, 0xFF, 0xFF, 0xFF, /* HID logical Max_32 */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count(1) */
+0x55, 0, /* HID unit exponent(0) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0xD1, 0xE4, /* Light illuminance and sensitivity REL PCT) */
+0x15, 0, /* HID logical Min_8(0) */
+0x26, 0x10, 0x27, /* HID logical Max_16(0x10,0x27) */
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0E, /* HID unit exponent(0x0E) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0xD1, 0x24, /* Sensor data (Light illuminance and mod max) */
+0x15, 0, /* HID logical Min_8(0) */
+0x26, 0xFF, 0xFF, /* HID logical Max_16(0xFF,0xFF) */
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0F, /* HID unit exponent(0x0F) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+0x0A, 0xD1, 0x34, /* Sensor data (Light illuminance and mod min) */
+0x15, 0, /* HID logical Min_8(0) */
+0x26, 0xFF, 0xFF, /* HID logical Max_16(0xFF,0xFF) */
+0x75, 16, /* HID report size(16) */
+0x95, 1, /* HID report count(1) */
+0x55, 0x0F, /* HID unit exponent(0x0F) */
+0xB1, 0x02, /* HID feature (Data_Arr_Abs) */
+
+//Input reports (transmit)
+0x05, 0x20, /* HID usage page sensors */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x02, 0x02, /* HID usage sensor event */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x10, 0x08, /* HID usage sensor event unknown sel */
+0x0A, 0x11, 0x08, /* HID usage sensor event state changed sel */
+0x0A, 0x12, 0x08, /* HID usage sensor event property changed sel */
+0x0A, 0x13, 0x08, /* HID usage sensor event data updated sel */
+0x0A, 0x14, 0x08, /* HID usage sensor event poll response sel */
+0x0A, 0x15, 0x08, /* HID usage sensor event change sensitivity sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0xD1, 0x04, /* HID usage sensor data light illuminance */
+0x17, 0x00, 0x00, 0x01, 0x80, /* HID logical Min_32 */
+0x27, 0xFF, 0xFF, 0xFF, 0x7F, /* HID logical Max_32 */
+0x55, 0x0F, /* HID unit exponent(0x0F) */
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count (1) */
+0X81, 0x02, /* HID Input (Data_Arr_Abs) */
+0xC0 /* HID end collection */
+};
+#endif
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index c183caf89d49..1dfe184ebf5a 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -48,6 +48,8 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define INPUT_REPORT_ID 0x5d
#define FEATURE_KBD_REPORT_ID 0x5a
#define FEATURE_KBD_REPORT_SIZE 16
+#define FEATURE_KBD_LED_REPORT_ID1 0x5d
+#define FEATURE_KBD_LED_REPORT_ID2 0x5e
#define SUPPORT_KBD_BACKLIGHT BIT(0)
@@ -80,6 +82,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_T101HA_DOCK BIT(9)
#define QUIRK_T90CHI BIT(10)
#define QUIRK_MEDION_E1239T BIT(11)
+#define QUIRK_ROG_NKEY_KEYBOARD BIT(12)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -332,6 +335,28 @@ static int asus_raw_event(struct hid_device *hdev,
if (drvdata->quirks & QUIRK_MEDION_E1239T)
return asus_e1239t_event(drvdata, data, size);
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ /*
+ * Skip these report ID, the device emits a continuous stream associated
+ * with the AURA mode it is in which looks like an 'echo'.
+ */
+ if (report->id == FEATURE_KBD_LED_REPORT_ID1 ||
+ report->id == FEATURE_KBD_LED_REPORT_ID2) {
+ return -1;
+ /* Additional report filtering */
+ } else if (report->id == FEATURE_KBD_REPORT_ID) {
+ /*
+ * G14 and G15 send these codes on some keypresses with no
+ * discernable reason for doing so. We'll filter them out to avoid
+ * unmapped warning messages later.
+ */
+ if (data[1] == 0xea || data[1] == 0xec || data[1] == 0x02 ||
+ data[1] == 0x8a || data[1] == 0x9e) {
+ return -1;
+ }
+ }
+ }
+
return 0;
}
@@ -344,7 +369,11 @@ static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size
if (!dmabuf)
return -ENOMEM;
- ret = hid_hw_raw_request(hdev, FEATURE_KBD_REPORT_ID, dmabuf,
+ /*
+ * The report ID should be set from the incoming buffer due to LED and key
+ * interfaces having different pages
+ */
+ ret = hid_hw_raw_request(hdev, buf[0], dmabuf,
buf_size, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
kfree(dmabuf);
@@ -397,6 +426,51 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
return ret;
}
+static int rog_nkey_led_init(struct hid_device *hdev)
+{
+ u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
+ u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20,
+ 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
+ u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1,
+ 0x05, 0x20, 0x31, 0x00, 0x08 };
+ int ret;
+
+ hid_info(hdev, "Asus initialise N-KEY Device");
+ /* The first message is an init start */
+ ret = asus_kbd_set_report(hdev, buf_init_start, sizeof(buf_init_start));
+ if (ret < 0) {
+ hid_warn(hdev, "Asus failed to send init start command: %d\n", ret);
+ return ret;
+ }
+ /* Followed by a string */
+ ret = asus_kbd_set_report(hdev, buf_init2, sizeof(buf_init2));
+ if (ret < 0) {
+ hid_warn(hdev, "Asus failed to send init command 1.0: %d\n", ret);
+ return ret;
+ }
+ /* Followed by a string */
+ ret = asus_kbd_set_report(hdev, buf_init3, sizeof(buf_init3));
+ if (ret < 0) {
+ hid_warn(hdev, "Asus failed to send init command 1.1: %d\n", ret);
+ return ret;
+ }
+
+ /* begin second report ID with same data */
+ buf_init2[0] = FEATURE_KBD_LED_REPORT_ID2;
+ buf_init3[0] = FEATURE_KBD_LED_REPORT_ID2;
+
+ ret = asus_kbd_set_report(hdev, buf_init2, sizeof(buf_init2));
+ if (ret < 0) {
+ hid_warn(hdev, "Asus failed to send init command 2.0: %d\n", ret);
+ return ret;
+ }
+ ret = asus_kbd_set_report(hdev, buf_init3, sizeof(buf_init3));
+ if (ret < 0)
+ hid_warn(hdev, "Asus failed to send init command 2.1: %d\n", ret);
+
+ return ret;
+}
+
static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
@@ -460,19 +534,25 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
unsigned char kbd_func;
int ret;
- /* Initialize keyboard */
- ret = asus_kbd_init(hdev);
- if (ret < 0)
- return ret;
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ ret = rog_nkey_led_init(hdev);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Initialize keyboard */
+ ret = asus_kbd_init(hdev);
+ if (ret < 0)
+ return ret;
- /* Get keyboard functions */
- ret = asus_kbd_get_functions(hdev, &kbd_func);
- if (ret < 0)
- return ret;
+ /* Get keyboard functions */
+ ret = asus_kbd_get_functions(hdev, &kbd_func);
+ if (ret < 0)
+ return ret;
- /* Check for backlight support */
- if (!(kbd_func & SUPPORT_KBD_BACKLIGHT))
- return -ENODEV;
+ /* Check for backlight support */
+ if (!(kbd_func & SUPPORT_KBD_BACKLIGHT))
+ return -ENODEV;
+ }
drvdata->kbd_backlight = devm_kzalloc(&hdev->dev,
sizeof(struct asus_kbd_leds),
@@ -751,8 +831,8 @@ static int asus_input_mapping(struct hid_device *hdev,
usage->hid == (HID_UP_GENDEVCTRLS | 0x0026)))
return -1;
- /* ASUS-specific keyboard hotkeys */
- if ((usage->hid & HID_USAGE_PAGE) == 0xff310000) {
+ /* ASUS-specific keyboard hotkeys and led backlight */
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_ASUSVENDOR) {
switch (usage->hid & HID_USAGE) {
case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break;
case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break;
@@ -780,6 +860,18 @@ static int asus_input_mapping(struct hid_device *hdev,
/* Fn+F5 "fan" symbol on FX503VD */
case 0x99: asus_map_key_clear(KEY_PROG4); break;
+ /* Fn+F5 "fan" symbol on N-Key keyboard */
+ case 0xae: asus_map_key_clear(KEY_PROG4); break;
+
+ /* Fn+Ret "Calc" symbol on N-Key keyboard */
+ case 0x92: asus_map_key_clear(KEY_CALC); break;
+
+ /* Fn+Left Aura mode previous on N-Key keyboard */
+ case 0xb2: asus_map_key_clear(KEY_PROG2); break;
+
+ /* Fn+Right Aura mode next on N-Key keyboard */
+ case 0xb3: asus_map_key_clear(KEY_PROG3); break;
+
default:
/* ASUS lazily declares 256 usages, ignore the rest,
* as some make the keyboard appear as a pointer device. */
@@ -1127,6 +1219,9 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD),
QUIRK_USE_KBD_BACKLIGHT },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 8c712d4bc075..e59e9911fc37 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -11,6 +11,7 @@
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
* Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
+ * Copyright (c) 2020 YOSHIOKA Takuma <lo48576@hard-wi.red>
*/
/*
@@ -29,25 +30,26 @@
* report descriptor but it does not appear that these enable software to
* control what the extra buttons map to. The only simple and straightforward
* solution seems to involve fixing up the report descriptor.
- *
- * Report descriptor format:
- * Positions 13, 15, 21 and 31 store the button bit count, button usage minimum,
- * button usage maximum and padding bit count respectively.
*/
#define MOUSE_BUTTONS_MAX 8
static void mouse_button_fixup(struct hid_device *hdev,
__u8 *rdesc, unsigned int rsize,
+ unsigned int button_bit_count,
+ unsigned int padding_bit,
+ unsigned int button_report_size,
+ unsigned int button_usage_maximum,
int nbuttons)
{
- if (rsize < 32 || rdesc[12] != 0x95 ||
- rdesc[14] != 0x75 || rdesc[15] != 0x01 ||
- rdesc[20] != 0x29 || rdesc[30] != 0x75)
+ if (rsize < 32 || rdesc[button_bit_count] != 0x95 ||
+ rdesc[button_report_size] != 0x75 ||
+ rdesc[button_report_size + 1] != 0x01 ||
+ rdesc[button_usage_maximum] != 0x29 || rdesc[padding_bit] != 0x75)
return;
hid_info(hdev, "Fixing up Elecom mouse button count\n");
nbuttons = clamp(nbuttons, 0, MOUSE_BUTTONS_MAX);
- rdesc[13] = nbuttons;
- rdesc[21] = nbuttons;
- rdesc[31] = MOUSE_BUTTONS_MAX - nbuttons;
+ rdesc[button_bit_count + 1] = nbuttons;
+ rdesc[button_usage_maximum + 1] = nbuttons;
+ rdesc[padding_bit + 1] = MOUSE_BUTTONS_MAX - nbuttons;
}
static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -62,16 +64,40 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[47] = 0x00;
}
break;
+ case USB_DEVICE_ID_ELECOM_M_XGL20DLBK:
+ /*
+ * Report descriptor format:
+ * 20: button bit count
+ * 28: padding bit count
+ * 22: button report size
+ * 14: button usage maximum
+ */
+ mouse_button_fixup(hdev, rdesc, *rsize, 20, 28, 22, 14, 8);
+ break;
case USB_DEVICE_ID_ELECOM_M_XT3URBK:
case USB_DEVICE_ID_ELECOM_M_XT3DRBK:
case USB_DEVICE_ID_ELECOM_M_XT4DRBK:
- mouse_button_fixup(hdev, rdesc, *rsize, 6);
+ /*
+ * Report descriptor format:
+ * 12: button bit count
+ * 30: padding bit count
+ * 14: button report size
+ * 20: button usage maximum
+ */
+ mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 6);
break;
case USB_DEVICE_ID_ELECOM_M_DT1URBK:
case USB_DEVICE_ID_ELECOM_M_DT1DRBK:
case USB_DEVICE_ID_ELECOM_M_HT1URBK:
case USB_DEVICE_ID_ELECOM_M_HT1DRBK:
- mouse_button_fixup(hdev, rdesc, *rsize, 8);
+ /*
+ * Report descriptor format:
+ * 12: button bit count
+ * 30: padding bit count
+ * 14: button report size
+ * 20: button usage maximum
+ */
+ mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
break;
}
return rdesc;
@@ -79,6 +105,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f170feaac40b..5ba0aa1d2335 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -190,6 +190,7 @@
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
+#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
#define USB_VENDOR_ID_ATEN 0x0557
@@ -359,6 +360,7 @@
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1 0x1843
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2 0x1844
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE3 0x1846
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -387,9 +389,11 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
+#define USB_DEVICE_ID_ELECOM_M_XGL20DLBK 0x00e6
#define USB_DEVICE_ID_ELECOM_M_XT3URBK 0x00fb
#define USB_DEVICE_ID_ELECOM_M_XT3DRBK 0x00fc
#define USB_DEVICE_ID_ELECOM_M_XT4DRBK 0x00fd
@@ -1074,6 +1078,9 @@
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
#define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER 0x1000
+#define USB_VENDOR_ID_SONY_GHLIVE 0x12ba
+#define USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE 0x074b
+
#define USB_VENDOR_ID_SINO_LITE 0x1345
#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
@@ -1133,6 +1140,7 @@
#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 4dca11392459..f23027d2795b 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -322,6 +322,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -537,9 +539,12 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
capacity = hidinput_scale_battery_capacity(dev, value);
if (dev->battery_status != HID_BATTERY_REPORTED ||
- capacity != dev->battery_capacity) {
+ capacity != dev->battery_capacity ||
+ ktime_after(ktime_get_coarse(), dev->battery_ratelimit_time)) {
dev->battery_capacity = capacity;
dev->battery_status = HID_BATTERY_REPORTED;
+ dev->battery_ratelimit_time =
+ ktime_add_ms(ktime_get_coarse(), 30 * 1000);
power_supply_changed(dev->battery);
}
}
@@ -746,6 +751,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
field->flags |= HID_MAIN_ITEM_RELATIVE;
break;
}
+ goto unknown;
default: goto unknown;
}
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 742c052b0110..22bfbebceaf4 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -18,10 +18,16 @@ static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) {
+ /* For Acer Aspire Switch 10 SW5-012 keyboard-dock */
if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) {
- hid_info(hdev, "Fixing up ITE keyboard report descriptor\n");
+ hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n");
rdesc[163] = HID_MAIN_ITEM_RELATIVE;
}
+ /* For Acer One S1002 keyboard-dock */
+ if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) {
+ hid_info(hdev, "Fixing up Acer S1002 ITE keyboard report descriptor\n");
+ rdesc[186] = HID_MAIN_ITEM_RELATIVE;
+ }
}
return rdesc;
@@ -103,6 +109,11 @@ static const struct hid_device_id ite_devices[] = {
/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_SYNAPTICS,
+ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002),
+ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
+ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_SYNAPTICS,
USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
{ }
};
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 1ffcfc9a1e03..45e7e0bdd382 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1869,6 +1869,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
0xc531),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech G602 receiver (0xc537) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc537),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech lightspeed receiver (0xc539) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 0ca723119547..7eb9a6ddb46a 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4048,9 +4048,13 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
{ /* MX Master mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
.driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* MX Ergo trackball over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
.driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* MX Master 3 mouse over Bluetooth */
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
index fc75f30f537c..92d7ecd41a78 100644
--- a/drivers/hid/hid-mf.c
+++ b/drivers/hid/hid-mf.c
@@ -153,6 +153,8 @@ static const struct hid_device_id mf_devices[] = {
.driver_data = HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2),
.driver_data = 0 }, /* No quirk required */
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3),
+ .driver_data = HID_QUIRK_MULTI_INPUT },
{ }
};
MODULE_DEVICE_TABLE(hid, mf_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index d670bcd57bde..8429ebe7097e 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
- if (cls->name == MT_CLS_WIN_8 &&
+ if ((cls->name == MT_CLS_WIN_8 ||
+ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -2054,6 +2055,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xce09) },
+
/* TopSeed panels */
{ .driver_data = MT_CLS_TOPSEED,
MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index bf7ecab5d9e5..d9ca874dffac 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -72,6 +72,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
@@ -366,6 +367,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
#endif
#if IS_ENABLED(CONFIG_HID_ELECOM)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
@@ -484,6 +486,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3) },
#endif
#if IS_ENABLED(CONFIG_HID_MICROSOFT)
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 2f073f536070..e3a557dc9ffd 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -11,6 +11,7 @@
* Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com>
* Copyright (c) 2014-2016 Frank Praznik <frank.praznik@gmail.com>
* Copyright (c) 2018 Todd Kelner
+ * Copyright (c) 2020 Pascal Giard <pascal.giard@etsmtl.ca>
*/
/*
@@ -35,6 +36,8 @@
#include <linux/idr.h>
#include <linux/input/mt.h>
#include <linux/crc32.h>
+#include <linux/usb.h>
+#include <linux/timer.h>
#include <asm/unaligned.h>
#include "hid-ids.h"
@@ -56,6 +59,7 @@
#define NSG_MR5U_REMOTE_BT BIT(14)
#define NSG_MR7U_REMOTE_BT BIT(15)
#define SHANWAN_GAMEPAD BIT(16)
+#define GHL_GUITAR_PS3WIIU BIT(17)
#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
#define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
@@ -79,6 +83,17 @@
#define NSG_MRXU_MAX_X 1667
#define NSG_MRXU_MAX_Y 1868
+#define GHL_GUITAR_POKE_INTERVAL 10 /* In seconds */
+#define GHL_GUITAR_TILT_USAGE 44
+
+/* Magic value and data taken from GHLtarUtility:
+ * https://github.com/ghlre/GHLtarUtility/blob/master/PS3Guitar.cs
+ * Note: The Wii U and PS3 dongles happen to share the same!
+ */
+static const u16 ghl_ps3wiiu_magic_value = 0x201;
+static const char ghl_ps3wiiu_magic_data[] = {
+ 0x02, 0x08, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00
+};
/* PS/3 Motion controller */
static u8 motion_rdesc[] = {
@@ -550,7 +565,9 @@ struct sony_sc {
struct power_supply_desc battery_desc;
int device_id;
unsigned fw_version;
+ bool fw_version_created;
unsigned hw_version;
+ bool hw_version_created;
u8 *output_report_dmabuf;
#ifdef CONFIG_SONY_FF
@@ -562,9 +579,8 @@ struct sony_sc {
u8 hotplug_worker_initialized;
u8 state_worker_initialized;
u8 defer_initialization;
- u8 cable_state;
- u8 battery_charging;
u8 battery_capacity;
+ int battery_status;
u8 led_state[MAX_LEDS];
u8 led_delay_on[MAX_LEDS];
u8 led_delay_off[MAX_LEDS];
@@ -578,6 +594,10 @@ struct sony_sc {
enum ds4_dongle_state ds4_dongle_state;
/* DS4 calibration data */
struct ds4_calibration_data ds4_calib_data[6];
+ /* GH Live */
+ struct timer_list ghl_poke_timer;
+ struct usb_ctrlrequest *ghl_cr;
+ u8 *ghl_databuf;
};
static void sony_set_leds(struct sony_sc *sc);
@@ -601,6 +621,85 @@ static inline void sony_schedule_work(struct sony_sc *sc,
}
}
+static void ghl_magic_poke_cb(struct urb *urb)
+{
+ if (urb) {
+ /* Free sc->ghl_cr and sc->ghl_databuf allocated in
+ * ghl_magic_poke()
+ */
+ kfree(urb->setup_packet);
+ kfree(urb->transfer_buffer);
+ }
+}
+
+static void ghl_magic_poke(struct timer_list *t)
+{
+ struct sony_sc *sc = from_timer(sc, t, ghl_poke_timer);
+
+ int ret;
+ unsigned int pipe;
+ struct urb *urb;
+ struct usb_device *usbdev = to_usb_device(sc->hdev->dev.parent->parent);
+ const u16 poke_size =
+ ARRAY_SIZE(ghl_ps3wiiu_magic_data);
+
+ pipe = usb_sndctrlpipe(usbdev, 0);
+
+ if (!sc->ghl_cr) {
+ sc->ghl_cr = kzalloc(sizeof(*sc->ghl_cr), GFP_ATOMIC);
+ if (!sc->ghl_cr)
+ goto resched;
+ }
+
+ if (!sc->ghl_databuf) {
+ sc->ghl_databuf = kzalloc(poke_size, GFP_ATOMIC);
+ if (!sc->ghl_databuf)
+ goto resched;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ goto resched;
+
+ sc->ghl_cr->bRequestType =
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT;
+ sc->ghl_cr->bRequest = USB_REQ_SET_CONFIGURATION;
+ sc->ghl_cr->wValue = cpu_to_le16(ghl_ps3wiiu_magic_value);
+ sc->ghl_cr->wIndex = 0;
+ sc->ghl_cr->wLength = cpu_to_le16(poke_size);
+ memcpy(sc->ghl_databuf, ghl_ps3wiiu_magic_data, poke_size);
+
+ usb_fill_control_urb(
+ urb, usbdev, pipe,
+ (unsigned char *) sc->ghl_cr, sc->ghl_databuf,
+ poke_size, ghl_magic_poke_cb, NULL);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ kfree(sc->ghl_databuf);
+ kfree(sc->ghl_cr);
+ }
+ usb_free_urb(urb);
+
+resched:
+ /* Reschedule for next time */
+ mod_timer(&sc->ghl_poke_timer, jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
+}
+
+static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) {
+ unsigned int abs = usage->hid & HID_USAGE;
+
+ if (abs == GHL_GUITAR_TILT_USAGE) {
+ hid_map_usage_clear(hi, usage, bit, max, EV_ABS, ABS_RY);
+ return 1;
+ }
+ }
+ return 0;
+}
+
static ssize_t ds4_show_poll_interval(struct device *dev,
struct device_attribute
*attr, char *buf)
@@ -892,7 +991,8 @@ static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
static const u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
unsigned long flags;
int offset;
- u8 cable_state, battery_capacity, battery_charging;
+ u8 battery_capacity;
+ int battery_status;
/*
* The sixaxis is charging if the battery value is 0xee
@@ -904,19 +1004,16 @@ static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
if (rd[offset] >= 0xee) {
battery_capacity = 100;
- battery_charging = !(rd[offset] & 0x01);
- cable_state = 1;
+ battery_status = (rd[offset] & 0x01) ? POWER_SUPPLY_STATUS_FULL : POWER_SUPPLY_STATUS_CHARGING;
} else {
u8 index = rd[offset] <= 5 ? rd[offset] : 5;
battery_capacity = sixaxis_battery_capacity[index];
- battery_charging = 0;
- cable_state = 0;
+ battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
}
spin_lock_irqsave(&sc->lock, flags);
- sc->cable_state = cable_state;
sc->battery_capacity = battery_capacity;
- sc->battery_charging = battery_charging;
+ sc->battery_status = battery_status;
spin_unlock_irqrestore(&sc->lock, flags);
if (sc->quirks & SIXAXIS_CONTROLLER) {
@@ -944,7 +1041,8 @@ static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
struct input_dev *input_dev = hidinput->input;
unsigned long flags;
int n, m, offset, num_touch_data, max_touch_data;
- u8 cable_state, battery_capacity, battery_charging;
+ u8 cable_state, battery_capacity;
+ int battery_status;
u16 timestamp;
/* When using Bluetooth the header is 2 bytes longer, so skip these. */
@@ -1049,29 +1147,52 @@ static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
*/
offset = data_offset + DS4_INPUT_REPORT_BATTERY_OFFSET;
cable_state = (rd[offset] >> 4) & 0x01;
- battery_capacity = rd[offset] & 0x0F;
/*
- * When a USB power source is connected the battery level ranges from
- * 0 to 10, and when running on battery power it ranges from 0 to 9.
- * A battery level above 10 when plugged in means charge completed.
+ * Interpretation of the battery_capacity data depends on the cable state.
+ * When no cable is connected (bit4 is 0):
+ * - 0:10: percentage in units of 10%.
+ * When a cable is plugged in:
+ * - 0-10: percentage in units of 10%.
+ * - 11: battery is full
+ * - 14: not charging due to Voltage or temperature error
+ * - 15: charge error
*/
- if (!cable_state || battery_capacity > 10)
- battery_charging = 0;
- else
- battery_charging = 1;
+ if (cable_state) {
+ u8 battery_data = rd[offset] & 0xf;
+
+ if (battery_data < 10) {
+ /* Take the mid-point for each battery capacity value,
+ * because on the hardware side 0 = 0-9%, 1=10-19%, etc.
+ * This matches official platform behavior, which does
+ * the same.
+ */
+ battery_capacity = battery_data * 10 + 5;
+ battery_status = POWER_SUPPLY_STATUS_CHARGING;
+ } else if (battery_data == 10) {
+ battery_capacity = 100;
+ battery_status = POWER_SUPPLY_STATUS_CHARGING;
+ } else if (battery_data == 11) {
+ battery_capacity = 100;
+ battery_status = POWER_SUPPLY_STATUS_FULL;
+ } else { /* 14, 15 and undefined values */
+ battery_capacity = 0;
+ battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+ } else {
+ u8 battery_data = rd[offset] & 0xf;
- if (!cable_state)
- battery_capacity++;
- if (battery_capacity > 10)
- battery_capacity = 10;
+ if (battery_data < 10)
+ battery_capacity = battery_data * 10 + 5;
+ else /* 10 */
+ battery_capacity = 100;
- battery_capacity *= 10;
+ battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
spin_lock_irqsave(&sc->lock, flags);
- sc->cable_state = cable_state;
sc->battery_capacity = battery_capacity;
- sc->battery_charging = battery_charging;
+ sc->battery_status = battery_status;
spin_unlock_irqrestore(&sc->lock, flags);
/*
@@ -1360,6 +1481,8 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & DUALSHOCK4_CONTROLLER)
return ds4_mapping(hdev, hi, field, usage, bit, max);
+ if (sc->quirks & GHL_GUITAR_PS3WIIU)
+ return guitar_mapping(hdev, hi, field, usage, bit, max);
/* Let hid-core decide for the others */
return 0;
@@ -1597,16 +1720,38 @@ static int dualshock4_get_calibration_data(struct sony_sc *sc)
* of the controller, so that it sends input reports of type 0x11.
*/
if (sc->quirks & (DUALSHOCK4_CONTROLLER_USB | DUALSHOCK4_DONGLE)) {
+ int retries;
+
buf = kmalloc(DS4_FEATURE_REPORT_0x02_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = hid_hw_raw_request(sc->hdev, 0x02, buf,
- DS4_FEATURE_REPORT_0x02_SIZE,
- HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret < 0)
- goto err_stop;
+ /* We should normally receive the feature report data we asked
+ * for, but hidraw applications such as Steam can issue feature
+ * reports as well. In particular for Dongle reconnects, Steam
+ * and this function are competing resulting in often receiving
+ * data for a different HID report, so retry a few times.
+ */
+ for (retries = 0; retries < 3; retries++) {
+ ret = hid_hw_raw_request(sc->hdev, 0x02, buf,
+ DS4_FEATURE_REPORT_0x02_SIZE,
+ HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0)
+ goto err_stop;
+
+ if (buf[0] != 0x02) {
+ if (retries < 2) {
+ hid_warn(sc->hdev, "Retrying DualShock 4 get calibration report (0x02) request\n");
+ continue;
+ } else {
+ ret = -EILSEQ;
+ goto err_stop;
+ }
+ } else {
+ break;
+ }
+ }
} else {
u8 bthdr = 0xA3;
u32 crc;
@@ -2300,12 +2445,12 @@ static int sony_battery_get_property(struct power_supply *psy,
struct sony_sc *sc = power_supply_get_drvdata(psy);
unsigned long flags;
int ret = 0;
- u8 battery_charging, battery_capacity, cable_state;
+ u8 battery_capacity;
+ int battery_status;
spin_lock_irqsave(&sc->lock, flags);
- battery_charging = sc->battery_charging;
battery_capacity = sc->battery_capacity;
- cable_state = sc->cable_state;
+ battery_status = sc->battery_status;
spin_unlock_irqrestore(&sc->lock, flags);
switch (psp) {
@@ -2319,13 +2464,7 @@ static int sony_battery_get_property(struct power_supply *psy,
val->intval = battery_capacity;
break;
case POWER_SUPPLY_PROP_STATUS:
- if (battery_charging)
- val->intval = POWER_SUPPLY_STATUS_CHARGING;
- else
- if (battery_capacity == 100 && cable_state)
- val->intval = POWER_SUPPLY_STATUS_FULL;
- else
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ val->intval = battery_status;
break;
default:
ret = -EINVAL;
@@ -2723,19 +2862,17 @@ static int sony_input_configured(struct hid_device *hdev,
ret = device_create_file(&sc->hdev->dev, &dev_attr_firmware_version);
if (ret) {
- /* Make zero for cleanup reasons of sysfs entries. */
- sc->fw_version = 0;
- sc->hw_version = 0;
hid_err(sc->hdev, "can't create sysfs firmware_version attribute err: %d\n", ret);
goto err_stop;
}
+ sc->fw_version_created = true;
ret = device_create_file(&sc->hdev->dev, &dev_attr_hardware_version);
if (ret) {
- sc->hw_version = 0;
hid_err(sc->hdev, "can't create sysfs hardware_version attribute err: %d\n", ret);
goto err_stop;
}
+ sc->hw_version_created = true;
/*
* The Dualshock 4 touchpad supports 2 touches and has a
@@ -2827,9 +2964,9 @@ err_stop:
*/
if (sc->ds4_bt_poll_interval)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
- if (sc->fw_version)
+ if (sc->fw_version_created)
device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
- if (sc->hw_version)
+ if (sc->hw_version_created)
device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
sony_cancel_work_sync(sc);
sony_remove_dev_list(sc);
@@ -2902,6 +3039,12 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
return -ENODEV;
}
+ if (sc->quirks & GHL_GUITAR_PS3WIIU) {
+ timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
+ mod_timer(&sc->ghl_poke_timer,
+ jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
+ }
+
return ret;
}
@@ -2909,15 +3052,18 @@ static void sony_remove(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
+ if (sc->quirks & GHL_GUITAR_PS3WIIU)
+ del_timer_sync(&sc->ghl_poke_timer);
+
hid_hw_close(hdev);
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
- if (sc->fw_version)
+ if (sc->fw_version_created)
device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
- if (sc->hw_version)
+ if (sc->hw_version_created)
device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
sony_cancel_work_sync(sc);
@@ -3020,6 +3166,9 @@ static const struct hid_device_id sony_devices[] = {
/* SMK-Link NSG-MR7U Remote Control */
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_NSG_MR7U_REMOTE),
.driver_data = NSG_MR7U_REMOTE_BT },
+ /* Guitar Hero Live PS3 and Wii U guitar dongles */
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY_GHLIVE, USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE),
+ .driver_data = GHL_GUITAR_PS3WIIU},
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index d26d8cd98efc..56406cee401f 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -90,7 +90,7 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev,
goto cleanup;
} else if (rc < 0) {
hid_err(hdev,
- "failed retrieving string descriptor #%hhu: %d\n",
+ "failed retrieving string descriptor #%u: %d\n",
idx, rc);
goto cleanup;
}
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 41012681cafd..4399d6c6afef 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1482,7 +1482,7 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
wdata->state.cmd_err = err;
wiimote_cmd_complete(wdata);
} else if (err) {
- hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
+ hid_warn(wdata->hdev, "Remote error %u on req %u\n", err,
cmd);
}
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 2eee5e31c2b7..79faac87a06f 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -170,7 +170,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
/*
* This function performs a Get_Report transfer over the control endpoint
* per section 7.2.1 of the HID specification, version 1.1. The first byte
- * of buffer is the report number to request, or 0x0 if the defice does not
+ * of buffer is the report number to request, or 0x0 if the device does not
* use numbered reports. The report_type parameter can be HID_FEATURE_REPORT
* or HID_INPUT_REPORT.
*/
@@ -428,6 +428,28 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
break;
}
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSINPUT(0))) {
+ int len = _IOC_SIZE(cmd);
+ ret = hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT);
+ break;
+ }
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGINPUT(0))) {
+ int len = _IOC_SIZE(cmd);
+ ret = hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT);
+ break;
+ }
+
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSOUTPUT(0))) {
+ int len = _IOC_SIZE(cmd);
+ ret = hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT);
+ break;
+ }
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGOUTPUT(0))) {
+ int len = _IOC_SIZE(cmd);
+ ret = hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT);
+ break;
+ }
+
/* Begin Read-only ioctls. */
if (_IOC_DIR(cmd) != _IOC_READ) {
ret = -EINVAL;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index aeff1ffb0c8b..bfe716d7ea44 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -1106,8 +1106,11 @@ static int i2c_hid_probe(struct i2c_client *client,
}
ret = i2c_hid_fetch_hid_descriptor(ihid);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to fetch the HID Descriptor\n");
goto err_regulator;
+ }
ret = i2c_hid_init_irq(client);
if (ret < 0)
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index 35f3bfc3e6f5..8e0f67455c09 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -405,6 +405,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
+ {
+ .ident = "Vero K147",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VERO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "K147"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
{ } /* Terminate list */
};
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index b8aae69ad15d..393bed0abee9 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -211,10 +211,8 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
struct ishtp_hid_data *hid_data;
hid = hid_allocate_device();
- if (IS_ERR(hid)) {
- rv = PTR_ERR(hid);
- return -ENOMEM;
- }
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
hid_data = kzalloc(sizeof(*hid_data), GFP_KERNEL);
if (!hid_data) {
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 17a29ee0ac6c..86257ce6d619 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -438,6 +438,7 @@ static void hid_irq_out(struct urb *urb)
break;
case -ESHUTDOWN: /* unplug */
unplug = 1;
+ break;
case -EILSEQ: /* protocol error or unplug */
case -EPROTO: /* protocol error or unplug */
case -ECONNRESET: /* unlink */
@@ -489,6 +490,7 @@ static void hid_ctrl(struct urb *urb)
break;
case -ESHUTDOWN: /* unplug */
unplug = 1;
+ break;
case -EILSEQ: /* protocol error or unplug */
case -EPROTO: /* protocol error or unplug */
case -ECONNRESET: /* unlink */
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index cd71e7133944..aa9e48876ced 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
}
if (flush)
- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
else if (insert)
- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
raw_data, report_size);
return insert && !flush;
@@ -1173,7 +1173,7 @@ static struct attribute *cintiq_led_attrs[] = {
NULL
};
-static struct attribute_group cintiq_led_attr_group = {
+static const struct attribute_group cintiq_led_attr_group = {
.name = "wacom_led",
.attrs = cintiq_led_attrs,
};
@@ -1194,7 +1194,7 @@ static struct attribute *intuos4_led_attrs[] = {
NULL
};
-static struct attribute_group intuos4_led_attr_group = {
+static const struct attribute_group intuos4_led_attr_group = {
.name = "wacom_led",
.attrs = intuos4_led_attrs,
};
@@ -1205,7 +1205,7 @@ static struct attribute *intuos5_led_attrs[] = {
NULL
};
-static struct attribute_group intuos5_led_attr_group = {
+static const struct attribute_group intuos5_led_attr_group = {
.name = "wacom_led",
.attrs = intuos5_led_attrs,
};
@@ -1216,13 +1216,13 @@ static struct attribute *generic_led_attrs[] = {
NULL
};
-static struct attribute_group generic_led_attr_group = {
+static const struct attribute_group generic_led_attr_group = {
.name = "wacom_led",
.attrs = generic_led_attrs,
};
struct wacom_sysfs_group_devres {
- struct attribute_group *group;
+ const struct attribute_group *group;
struct kobject *root;
};
@@ -1238,7 +1238,7 @@ static void wacom_devm_sysfs_group_release(struct device *dev, void *res)
static int __wacom_devm_sysfs_create_group(struct wacom *wacom,
struct kobject *root,
- struct attribute_group *group)
+ const struct attribute_group *group)
{
struct wacom_sysfs_group_devres *devres;
int error;
@@ -1264,12 +1264,44 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom,
}
static int wacom_devm_sysfs_create_group(struct wacom *wacom,
- struct attribute_group *group)
+ const struct attribute_group *group)
{
return __wacom_devm_sysfs_create_group(wacom, &wacom->hdev->dev.kobj,
group);
}
+static void wacom_devm_kfifo_release(struct device *dev, void *res)
+{
+ struct kfifo_rec_ptr_2 *devres = res;
+
+ kfifo_free(devres);
+}
+
+static int wacom_devm_kfifo_alloc(struct wacom *wacom)
+{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct kfifo_rec_ptr_2 *pen_fifo;
+ int error;
+
+ pen_fifo = devres_alloc(wacom_devm_kfifo_release,
+ sizeof(struct kfifo_rec_ptr_2),
+ GFP_KERNEL);
+
+ if (!pen_fifo)
+ return -ENOMEM;
+
+ error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ if (error) {
+ devres_free(pen_fifo);
+ return error;
+ }
+
+ devres_add(&wacom->hdev->dev, pen_fifo);
+ wacom_wac->pen_fifo = pen_fifo;
+
+ return 0;
+}
+
enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
{
struct wacom *wacom = led->wacom;
@@ -1847,7 +1879,7 @@ static struct attribute *remote##SET_ID##_serial_attrs[] = { \
&remote##SET_ID##_mode_attr.attr, \
NULL \
}; \
-static struct attribute_group remote##SET_ID##_serial_group = { \
+static const struct attribute_group remote##SET_ID##_serial_group = { \
.name = NULL, \
.attrs = remote##SET_ID##_serial_attrs, \
}
@@ -2724,7 +2756,7 @@ static int wacom_probe(struct hid_device *hdev,
if (features->check_for_hid_type && features->hid_type != hdev->type)
return -ENODEV;
- error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ error = wacom_devm_kfifo_alloc(wacom);
if (error)
return error;
@@ -2786,8 +2818,6 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom);
-
- kfifo_free(&wacom_wac->pen_fifo);
}
#ifdef CONFIG_PM
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index da612b6e9c77..195910dd2154 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -342,7 +342,7 @@ struct wacom_wac {
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
- struct kfifo_rec_ptr_2 pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index fa69b94debd9..7596dc164648 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -355,7 +355,7 @@ static int ssi_add_controller(struct hsi_controller *ssi,
err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL);
if (err < 0)
- goto out_err;
+ return err;
ssi->id = err;
ssi->owner = THIS_MODULE;
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
index 47f0208aa7c3..c3fb5beb846e 100644
--- a/drivers/hsi/hsi_core.c
+++ b/drivers/hsi/hsi_core.c
@@ -352,7 +352,7 @@ static void hsi_port_release(struct device *dev)
}
/**
- * hsi_unregister_port - Unregister an HSI port
+ * hsi_port_unregister_clients - Unregister an HSI port
* @port: The HSI port to unregister
*/
void hsi_port_unregister_clients(struct hsi_port *port)
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index fbdda9938039..6fb0c76bfbf8 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -503,6 +503,70 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
+/**
+ * request_arr_init - Allocates memory for the requestor array. Each slot
+ * keeps track of the next available slot in the array. Initially, each
+ * slot points to the next one (as in a Linked List). The last slot
+ * does not point to anything, so its value is U64_MAX by default.
+ * @size The size of the array
+ */
+static u64 *request_arr_init(u32 size)
+{
+ int i;
+ u64 *req_arr;
+
+ req_arr = kcalloc(size, sizeof(u64), GFP_KERNEL);
+ if (!req_arr)
+ return NULL;
+
+ for (i = 0; i < size - 1; i++)
+ req_arr[i] = i + 1;
+
+ /* Last slot (no more available slots) */
+ req_arr[i] = U64_MAX;
+
+ return req_arr;
+}
+
+/*
+ * vmbus_alloc_requestor - Initializes @rqstor's fields.
+ * Index 0 is the first free slot
+ * @size: Size of the requestor array
+ */
+static int vmbus_alloc_requestor(struct vmbus_requestor *rqstor, u32 size)
+{
+ u64 *rqst_arr;
+ unsigned long *bitmap;
+
+ rqst_arr = request_arr_init(size);
+ if (!rqst_arr)
+ return -ENOMEM;
+
+ bitmap = bitmap_zalloc(size, GFP_KERNEL);
+ if (!bitmap) {
+ kfree(rqst_arr);
+ return -ENOMEM;
+ }
+
+ rqstor->req_arr = rqst_arr;
+ rqstor->req_bitmap = bitmap;
+ rqstor->size = size;
+ rqstor->next_request_id = 0;
+ spin_lock_init(&rqstor->req_lock);
+
+ return 0;
+}
+
+/*
+ * vmbus_free_requestor - Frees memory allocated for @rqstor
+ * @rqstor: Pointer to the requestor struct
+ */
+static void vmbus_free_requestor(struct vmbus_requestor *rqstor)
+{
+ kfree(rqstor->req_arr);
+ bitmap_free(rqstor->req_bitmap);
+}
+
static int __vmbus_open(struct vmbus_channel *newchannel,
void *userdata, u32 userdatalen,
void (*onchannelcallback)(void *context), void *context)
@@ -523,6 +587,12 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (newchannel->state != CHANNEL_OPEN_STATE)
return -EINVAL;
+ /* Create and init requestor */
+ if (newchannel->rqstor_size) {
+ if (vmbus_alloc_requestor(&newchannel->requestor, newchannel->rqstor_size))
+ return -ENOMEM;
+ }
+
newchannel->state = CHANNEL_OPENING_STATE;
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
@@ -626,6 +696,7 @@ error_free_gpadl:
error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
+ vmbus_free_requestor(&newchannel->requestor);
newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
@@ -808,6 +879,9 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
channel->ringbuffer_gpadlhandle = 0;
}
+ if (!ret)
+ vmbus_free_requestor(&channel->requestor);
+
return ret;
}
@@ -888,7 +962,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
/* in 8-bytes granularity */
desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
desc.len8 = (u16)(packetlen_aligned >> 3);
- desc.trans_id = requestid;
+ desc.trans_id = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
bufferlist[0].iov_base = &desc;
bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
@@ -897,7 +971,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, num_vecs);
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid);
}
EXPORT_SYMBOL(vmbus_sendpacket);
@@ -939,7 +1013,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
- desc.transactionid = requestid;
+ desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc.reserved = 0;
desc.rangecount = pagecount;
@@ -956,7 +1030,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3);
+ return hv_ringbuffer_write(channel, bufferlist, 3, requestid);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
@@ -983,7 +1057,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
- desc->transactionid = requestid;
+ desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc->reserved = 0;
desc->rangecount = 1;
@@ -994,7 +1068,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3);
+ return hv_ringbuffer_write(channel, bufferlist, 3, requestid);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -1042,3 +1116,91 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
buffer_actual_len, requestid, true);
}
EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
+
+/*
+ * vmbus_next_request_id - Returns a new request id. It is also
+ * the index at which the guest memory address is stored.
+ * Uses a spin lock to avoid race conditions.
+ * @rqstor: Pointer to the requestor struct
+ * @rqst_add: Guest memory address to be stored in the array
+ */
+u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr)
+{
+ unsigned long flags;
+ u64 current_id;
+ const struct vmbus_channel *channel =
+ container_of(rqstor, const struct vmbus_channel, requestor);
+
+ /* Check rqstor has been initialized */
+ if (!channel->rqstor_size)
+ return VMBUS_NO_RQSTOR;
+
+ spin_lock_irqsave(&rqstor->req_lock, flags);
+ current_id = rqstor->next_request_id;
+
+ /* Requestor array is full */
+ if (current_id >= rqstor->size) {
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+ return VMBUS_RQST_ERROR;
+ }
+
+ rqstor->next_request_id = rqstor->req_arr[current_id];
+ rqstor->req_arr[current_id] = rqst_addr;
+
+ /* The already held spin lock provides atomicity */
+ bitmap_set(rqstor->req_bitmap, current_id, 1);
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+
+ /*
+ * Cannot return an ID of 0, which is reserved for an unsolicited
+ * message from Hyper-V.
+ */
+ return current_id + 1;
+}
+EXPORT_SYMBOL_GPL(vmbus_next_request_id);
+
+/*
+ * vmbus_request_addr - Returns the memory address stored at @trans_id
+ * in @rqstor. Uses a spin lock to avoid race conditions.
+ * @rqstor: Pointer to the requestor struct
+ * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
+ * next request id.
+ */
+u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id)
+{
+ unsigned long flags;
+ u64 req_addr;
+ const struct vmbus_channel *channel =
+ container_of(rqstor, const struct vmbus_channel, requestor);
+
+ /* Check rqstor has been initialized */
+ if (!channel->rqstor_size)
+ return VMBUS_NO_RQSTOR;
+
+ /* Hyper-V can send an unsolicited message with ID of 0 */
+ if (!trans_id)
+ return trans_id;
+
+ spin_lock_irqsave(&rqstor->req_lock, flags);
+
+ /* Data corresponding to trans_id is stored at trans_id - 1 */
+ trans_id--;
+
+ /* Invalid trans_id */
+ if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap)) {
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+ return VMBUS_RQST_ERROR;
+ }
+
+ req_addr = rqstor->req_arr[trans_id];
+ rqstor->req_arr[trans_id] = rqstor->next_request_id;
+ rqstor->next_request_id = trans_id;
+
+ /* The already held spin lock provides atomicity */
+ bitmap_clear(rqstor->req_bitmap, trans_id, 1);
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+ return req_addr;
+}
+EXPORT_SYMBOL_GPL(vmbus_request_addr);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index eb56e09ae15f..8c471823a5af 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1198,6 +1198,7 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
__ClearPageOffline(pg);
__free_page(pg);
dm->num_pages_ballooned--;
+ adjust_managed_page_count(pg, 1);
}
}
@@ -1238,8 +1239,10 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
/* mark all pages offline */
- for (j = 0; j < (1 << get_order(alloc_unit << PAGE_SHIFT)); j++)
+ for (j = 0; j < alloc_unit; j++) {
__SetPageOffline(pg + j);
+ adjust_managed_page_count(pg + j, -1);
+ }
bl_resp->range_count++;
bl_resp->range_array[i].finfo.start_page =
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 40e2b9f91163..9416e09ebd58 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -179,21 +179,21 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct vmbus_channel *channel,
- const struct kvec *kv_list, u32 kv_count);
+ const struct kvec *kv_list, u32 kv_count,
+ u64 requestid);
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw);
/*
- * The Maximum number of channels (16348) is determined by the size of the
+ * The Maximum number of channels (16384) is determined by the size of the
* interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
* send endpoint interrupts, and the other is to receive endpoint interrupts.
*/
#define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3)
/* The value here must be in multiple of 32 */
-/* TODO: Need to make this configurable */
#define MAX_NUM_CHANNELS_SUPPORTED 256
#define MAX_CHANNEL_RELIDS \
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 356e22159e83..35833d4d1a1d 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -248,7 +248,8 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
- const struct kvec *kv_list, u32 kv_count)
+ const struct kvec *kv_list, u32 kv_count,
+ u64 requestid)
{
int i;
u32 bytes_avail_towrite;
@@ -258,6 +259,8 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
u64 prev_indices;
unsigned long flags;
struct hv_ring_buffer_info *outring_info = &channel->outbound;
+ struct vmpacket_descriptor *desc = kv_list[0].iov_base;
+ u64 rqst_id = VMBUS_NO_RQSTOR;
if (channel->rescind)
return -ENODEV;
@@ -300,6 +303,23 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
kv_list[i].iov_len);
}
+ /*
+ * Allocate the request ID after the data has been copied into the
+ * ring buffer. Once this request ID is allocated, the completion
+ * path could find the data and free it.
+ */
+
+ if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
+ rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
+ if (rqst_id == VMBUS_RQST_ERROR) {
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ pr_err("No request id available\n");
+ return -EAGAIN;
+ }
+ }
+ desc = hv_get_ring_buffer(outring_info) + old_write;
+ desc->trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
+
/* Set previous packet start */
prev_indices = hv_get_ring_bufferindices(outring_info);
@@ -319,8 +339,13 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
hv_signal_on_write(old_write, channel);
- if (channel->rescind)
+ if (channel->rescind) {
+ if (rqst_id != VMBUS_NO_RQSTOR) {
+ /* Reclaim request ID to avoid leak of IDs */
+ vmbus_request_addr(&channel->requestor, rqst_id);
+ }
return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4fad3e6745e5..d491fdcee61f 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -55,7 +55,7 @@ int vmbus_interrupt;
/*
* Boolean to control whether to report panic messages over Hyper-V.
*
- * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
+ * It can be set via /proc/sys/kernel/hyperv_record_panic_msg
*/
static int sysctl_record_panic_msg = 1;
@@ -156,6 +156,7 @@ static u32 channel_conn_id(struct vmbus_channel *channel,
{
u8 monitor_group = channel_monitor_group(channel);
u8 monitor_offset = channel_monitor_offset(channel);
+
return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
}
@@ -550,6 +551,7 @@ static ssize_t vendor_show(struct device *dev,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
+
return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
}
static DEVICE_ATTR_RO(vendor);
@@ -559,6 +561,7 @@ static ssize_t device_show(struct device *dev,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
+
return sprintf(buf, "0x%x\n", hv_dev->device_id);
}
static DEVICE_ATTR_RO(device);
@@ -1384,6 +1387,24 @@ static struct kmsg_dumper hv_kmsg_dumper = {
.dump = hv_kmsg_dump,
};
+static void hv_kmsg_dump_register(void)
+{
+ int ret;
+
+ hv_panic_page = hv_alloc_hyperv_zeroed_page();
+ if (!hv_panic_page) {
+ pr_err("Hyper-V: panic message page memory allocation failed\n");
+ return;
+ }
+
+ ret = kmsg_dump_register(&hv_kmsg_dumper);
+ if (ret) {
+ pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
+ hv_free_hyperv_page((unsigned long)hv_panic_page);
+ hv_panic_page = NULL;
+ }
+}
+
static struct ctl_table_header *hv_ctl_table_hdr;
/*
@@ -1474,21 +1495,8 @@ static int vmbus_bus_init(void)
* capability is supported by the hypervisor.
*/
hv_get_crash_ctl(hyperv_crash_ctl);
- if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
- hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page();
- if (hv_panic_page) {
- ret = kmsg_dump_register(&hv_kmsg_dumper);
- if (ret) {
- pr_err("Hyper-V: kmsg dump register "
- "error 0x%x\n", ret);
- hv_free_hyperv_page(
- (unsigned long)hv_panic_page);
- hv_panic_page = NULL;
- }
- } else
- pr_err("Hyper-V: panic message page memory "
- "allocation failed");
- }
+ if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
+ hv_kmsg_dump_register();
register_die_notifier(&hyperv_die_block);
}
@@ -1812,7 +1820,7 @@ static ssize_t channel_pending_show(struct vmbus_channel *channel,
channel_pending(channel,
vmbus_connection.monitor_pages[1]));
}
-static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
+static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
static ssize_t channel_latency_show(struct vmbus_channel *channel,
char *buf)
@@ -1821,19 +1829,19 @@ static ssize_t channel_latency_show(struct vmbus_channel *channel,
channel_latency(channel,
vmbus_connection.monitor_pages[1]));
}
-static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
+static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->interrupts);
}
-static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
+static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->sig_events);
}
-static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
char *buf)
@@ -1872,7 +1880,7 @@ static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
{
return sprintf(buf, "%u\n", channel->offermsg.monitorid);
}
-static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
+static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
static ssize_t subchannel_id_show(struct vmbus_channel *channel,
char *buf)
@@ -2377,7 +2385,7 @@ static int vmbus_bus_suspend(struct device *dev)
* We wait here until the completion of any channel
* offers that are currently in progress.
*/
- msleep(1);
+ usleep_range(1000, 2000);
}
mutex_lock(&vmbus_connection.channel_mutex);
@@ -2542,7 +2550,6 @@ static void hv_kexec_handler(void)
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
- hyperv_cleanup();
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -2558,7 +2565,6 @@ static void hv_crash_handler(struct pt_regs *regs)
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_disable_regs(cpu);
- hyperv_cleanup();
};
static int hv_synic_suspend(void)
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
index 9b306448b7a0..822c2e74b98d 100644
--- a/drivers/hwmon/amd_energy.c
+++ b/drivers/hwmon/amd_energy.c
@@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
*/
cpus = num_present_cpus() / num_siblings;
- s_config = devm_kcalloc(dev, cpus + sockets,
+ s_config = devm_kcalloc(dev, cpus + sockets + 1,
sizeof(u32), GFP_KERNEL);
if (!s_config)
return -ENOMEM;
@@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
}
+ s_config[i] = 0;
return 0;
}
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index a250481b5a97..3bc2551577a3 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -11,13 +11,6 @@
* convert raw register values is from https://github.com/ocerman/zenpower.
* The information is not confirmed from chip datasheets, but experiments
* suggest that it provides reasonable temperature values.
- * - Register addresses to read chip voltage and current are also from
- * https://github.com/ocerman/zenpower, and not confirmed from chip
- * datasheets. Current calibration is board specific and not typically
- * shared by board vendors. For this reason, current values are
- * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC
- * current. Reported values can be adjusted using the sensors configuration
- * file.
*/
#include <linux/bitops.h>
@@ -109,10 +102,7 @@ struct k10temp_data {
int temp_offset;
u32 temp_adjust_mask;
u32 show_temp;
- u32 svi_addr[2];
bool is_zen;
- bool show_current;
- int cfactor[2];
};
#define TCTL_BIT 0
@@ -137,16 +127,6 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
-static bool is_threadripper(void)
-{
- return strstr(boot_cpu_data.x86_model_id, "Threadripper");
-}
-
-static bool is_epyc(void)
-{
- return strstr(boot_cpu_data.x86_model_id, "EPYC");
-}
-
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
{
pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
@@ -211,16 +191,6 @@ static const char *k10temp_temp_label[] = {
"Tccd8",
};
-static const char *k10temp_in_label[] = {
- "Vcore",
- "Vsoc",
-};
-
-static const char *k10temp_curr_label[] = {
- "Icore",
- "Isoc",
-};
-
static int k10temp_read_labels(struct device *dev,
enum hwmon_sensor_types type,
u32 attr, int channel, const char **str)
@@ -229,50 +199,6 @@ static int k10temp_read_labels(struct device *dev,
case hwmon_temp:
*str = k10temp_temp_label[channel];
break;
- case hwmon_in:
- *str = k10temp_in_label[channel];
- break;
- case hwmon_curr:
- *str = k10temp_curr_label[channel];
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int k10temp_read_curr(struct device *dev, u32 attr, int channel,
- long *val)
-{
- struct k10temp_data *data = dev_get_drvdata(dev);
- u32 regval;
-
- switch (attr) {
- case hwmon_curr_input:
- amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
- data->svi_addr[channel], &regval);
- *val = DIV_ROUND_CLOSEST(data->cfactor[channel] *
- (regval & 0xff),
- 1000);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val)
-{
- struct k10temp_data *data = dev_get_drvdata(dev);
- u32 regval;
-
- switch (attr) {
- case hwmon_in_input:
- amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
- data->svi_addr[channel], &regval);
- regval = (regval >> 16) & 0xff;
- *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100);
- break;
default:
return -EOPNOTSUPP;
}
@@ -331,10 +257,6 @@ static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
switch (type) {
case hwmon_temp:
return k10temp_read_temp(dev, attr, channel, val);
- case hwmon_in:
- return k10temp_read_in(dev, attr, channel, val);
- case hwmon_curr:
- return k10temp_read_curr(dev, attr, channel, val);
default:
return -EOPNOTSUPP;
}
@@ -383,11 +305,6 @@ static umode_t k10temp_is_visible(const void *_data,
return 0;
}
break;
- case hwmon_in:
- case hwmon_curr:
- if (!data->show_current)
- return 0;
- break;
default:
return 0;
}
@@ -517,20 +434,10 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case 0x8: /* Zen+ */
case 0x11: /* Zen APU */
case 0x18: /* Zen+ APU */
- data->show_current = !is_threadripper() && !is_epyc();
- data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0;
- data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1;
- data->cfactor[0] = F17H_M01H_CFACTOR_ICORE;
- data->cfactor[1] = F17H_M01H_CFACTOR_ISOC;
k10temp_get_ccd_support(pdev, data, 4);
break;
case 0x31: /* Zen2 Threadripper */
case 0x71: /* Zen2 */
- data->show_current = !is_threadripper() && !is_epyc();
- data->cfactor[0] = F17H_M31H_CFACTOR_ICORE;
- data->cfactor[1] = F17H_M31H_CFACTOR_ISOC;
- data->svi_addr[0] = F17H_M31H_SVI_TEL_PLANE0;
- data->svi_addr[1] = F17H_M31H_SVI_TEL_PLANE1;
k10temp_get_ccd_support(pdev, data, 8);
break;
}
@@ -542,11 +449,6 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
switch (boot_cpu_data.x86_model) {
case 0x0 ... 0x1: /* Zen3 */
- data->show_current = true;
- data->svi_addr[0] = F19H_M01_SVI_TEL_PLANE0;
- data->svi_addr[1] = F19H_M01_SVI_TEL_PLANE1;
- data->cfactor[0] = F19H_M01H_CFACTOR_ICORE;
- data->cfactor[1] = F19H_M01H_CFACTOR_ISOC;
k10temp_get_ccd_support(pdev, data, 8);
break;
}
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 777439f48c14..111a91dc6b79 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -334,8 +334,18 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm_value = MAX_PWM;
- /* Set duty cycle to maximum allowed and enable PWM output */
pwm_init_state(ctx->pwm, &state);
+ /*
+ * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+ * long. Check this here to prevent the fan running at a too low
+ * frequency.
+ */
+ if (state.period > ULONG_MAX / MAX_PWM + 1) {
+ dev_err(dev, "Configured period too big\n");
+ return -EINVAL;
+ }
+
+ /* Set duty cycle to maximum allowed and enable PWM output */
state.duty_cycle = ctx->pwm->args.period - 1;
state.enabled = true;
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 09ce30cba54b..17d064e58938 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -30,7 +30,7 @@ static inline u64 __pow10(u8 x)
static int scmi_hwmon_scale(const struct scmi_sensor_info *sensor, u64 *value)
{
- s8 scale = sensor->scale;
+ int scale = sensor->scale;
u64 f;
switch (sensor->type) {
diff --git a/drivers/hwspinlock/sirf_hwspinlock.c b/drivers/hwspinlock/sirf_hwspinlock.c
index 823d3c4f621e..a3f77120bad7 100644
--- a/drivers/hwspinlock/sirf_hwspinlock.c
+++ b/drivers/hwspinlock/sirf_hwspinlock.c
@@ -94,7 +94,7 @@ static struct platform_driver sirf_hwspinlock_driver = {
.probe = sirf_hwspinlock_probe,
.driver = {
.name = "atlas7_hwspinlock",
- .of_match_table = of_match_ptr(sirf_hwpinlock_ids),
+ .of_match_table = sirf_hwpinlock_ids,
},
};
diff --git a/drivers/hwspinlock/sprd_hwspinlock.c b/drivers/hwspinlock/sprd_hwspinlock.c
index 36dc8038bbb4..d221fc9d756d 100644
--- a/drivers/hwspinlock/sprd_hwspinlock.c
+++ b/drivers/hwspinlock/sprd_hwspinlock.c
@@ -4,7 +4,6 @@
* Copyright (C) 2017 Spreadtrum - http://www.spreadtrum.com
*/
-#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -15,7 +14,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include "hwspinlock_internal.h"
@@ -148,21 +146,10 @@ static struct platform_driver sprd_hwspinlock_driver = {
.probe = sprd_hwspinlock_probe,
.driver = {
.name = "sprd_hwspinlock",
- .of_match_table = of_match_ptr(sprd_hwspinlock_of_match),
+ .of_match_table = sprd_hwspinlock_of_match,
},
};
-
-static int __init sprd_hwspinlock_init(void)
-{
- return platform_driver_register(&sprd_hwspinlock_driver);
-}
-postcore_initcall(sprd_hwspinlock_init);
-
-static void __exit sprd_hwspinlock_exit(void)
-{
- platform_driver_unregister(&sprd_hwspinlock_driver);
-}
-module_exit(sprd_hwspinlock_exit);
+module_platform_driver(sprd_hwspinlock_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware spinlock driver for Spreadtrum");
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 52acd77438ed..251e75c9ba9d 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -269,6 +269,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Alder Lake-P */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 3e7df1c0477f..81d7b21d31ec 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
static int stm_heartbeat_init(void)
{
- int i, ret = -ENOMEM;
+ int i, ret;
if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
return -EINVAL;
@@ -72,8 +72,10 @@ static int stm_heartbeat_init(void)
for (i = 0; i < nr_devs; i++) {
stm_heartbeat[i].data.name =
kasprintf(GFP_KERNEL, "heartbeat.%d", i);
- if (!stm_heartbeat[i].data.name)
+ if (!stm_heartbeat[i].data.name) {
+ ret = -ENOMEM;
goto fail_unregister;
+ }
stm_heartbeat[i].data.nr_chans = 1;
stm_heartbeat[i].data.link = stm_heartbeat_link;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d4d60ad0eda0..ab1f39ac39f4 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1013,6 +1013,7 @@ config I2C_SIRF
config I2C_SPRD
tristate "Spreadtrum I2C interface"
depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
+ depends on COMMON_CLK
help
If you say yes to this option, support will be included for the
Spreadtrum I2C interface.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ae90713443fa..877fe3733a42 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1449,7 +1449,7 @@ static int i801_add_mux(struct i801_priv *priv)
/* Register GPIO descriptor lookup table */
lookup = devm_kzalloc(dev,
- struct_size(lookup, table, mux_config->n_gpios),
+ struct_size(lookup, table, mux_config->n_gpios + 1),
GFP_KERNEL);
if (!lookup)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b444fbf1a262..a8e8af57e33f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -241,6 +241,19 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = {
};
+static const struct platform_device_id imx_i2c_devtype[] = {
+ {
+ .name = "imx1-i2c",
+ .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
+ }, {
+ .name = "imx21-i2c",
+ .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
+
static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
{ .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
@@ -1330,7 +1343,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
return -ENOMEM;
match = device_get_match_data(&pdev->dev);
- i2c_imx->hwdata = match;
+ if (match)
+ i2c_imx->hwdata = match;
+ else
+ i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+ platform_get_device_id(pdev)->driver_data;
/* Setup i2c_imx driver structure */
strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
@@ -1498,6 +1515,7 @@ static struct platform_driver i2c_imx_driver = {
.of_match_table = i2c_imx_dt_ids,
.acpi_match_table = i2c_imx_acpi_ids,
},
+ .id_table = imx_i2c_devtype,
};
static int __init i2c_adap_imx_init(void)
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 33de99b7bc20..2ffd2f354d0a 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -38,6 +38,7 @@
#define I2C_IO_CONFIG_OPEN_DRAIN 0x0003
#define I2C_IO_CONFIG_PUSH_PULL 0x0000
#define I2C_SOFT_RST 0x0001
+#define I2C_HANDSHAKE_RST 0x0020
#define I2C_FIFO_ADDR_CLR 0x0001
#define I2C_DELAY_LEN 0x0002
#define I2C_TIME_CLR_VALUE 0x0000
@@ -45,6 +46,7 @@
#define I2C_WRRD_TRANAC_VALUE 0x0002
#define I2C_RD_TRANAC_VALUE 0x0001
#define I2C_SCL_MIS_COMP_VALUE 0x0000
+#define I2C_CHN_CLR_FLAG 0x0000
#define I2C_DMA_CON_TX 0x0000
#define I2C_DMA_CON_RX 0x0001
@@ -54,7 +56,9 @@
#define I2C_DMA_START_EN 0x0001
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
+#define I2C_DMA_WARM_RST 0x0001
#define I2C_DMA_HARD_RST 0x0002
+#define I2C_DMA_HANDSHAKE_RST 0x0004
#define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64
@@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
- udelay(50);
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
-
- mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ if (i2c->dev_comp->dma_sync) {
+ writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
+ i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
+ OFFSET_SOFTRESET);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
+ } else {
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(50);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ }
/* Set ioconfig */
if (i2c->use_push_pull)
@@ -1258,7 +1275,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
mtk_i2c_clock_disable(i2c);
ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
- IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c);
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ I2C_DRV_NAME, i2c);
if (ret < 0) {
dev_err(&pdev->dev,
"Request I2C IRQ %d fail\n", irq);
@@ -1285,7 +1303,16 @@ static int mtk_i2c_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int mtk_i2c_resume(struct device *dev)
+static int mtk_i2c_suspend_noirq(struct device *dev)
+{
+ struct mtk_i2c *i2c = dev_get_drvdata(dev);
+
+ i2c_mark_adapter_suspended(&i2c->adap);
+
+ return 0;
+}
+
+static int mtk_i2c_resume_noirq(struct device *dev)
{
int ret;
struct mtk_i2c *i2c = dev_get_drvdata(dev);
@@ -1300,12 +1327,15 @@ static int mtk_i2c_resume(struct device *dev)
mtk_i2c_clock_disable(i2c);
+ i2c_mark_adapter_resumed(&i2c->adap);
+
return 0;
}
#endif
static const struct dev_pm_ops mtk_i2c_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
+ mtk_i2c_resume_noirq)
};
static struct platform_driver mtk_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index d9607905dc2f..845eda70b8ca 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+ if (data[i] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
length += data[i];
}
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 19cda6742423..2917fecf6c80 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -72,6 +72,8 @@
/* timeout (ms) for pm runtime autosuspend */
#define SPRD_I2C_PM_TIMEOUT 1000
+/* timeout (ms) for transfer message */
+#define I2C_XFER_TIMEOUT 1000
/* SPRD i2c data structure */
struct sprd_i2c {
@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, bool is_last_msg)
{
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ unsigned long time_left;
i2c_dev->msg = msg;
i2c_dev->buf = msg->buf;
@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
sprd_i2c_opt_start(i2c_dev);
- wait_for_completion(&i2c_dev->complete);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ msecs_to_jiffies(I2C_XFER_TIMEOUT));
+ if (!time_left)
+ return -ETIMEDOUT;
return i2c_dev->err;
}
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index ec7a7e917edd..c0c7d01473f2 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
flags &= ~I2C_M_RECV_LEN;
}
- return (flags != 0) ? -EINVAL : 0;
+ return 0;
}
/**
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 6f08c0c3238d..8b113ae32dc7 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
/* read back register to make sure that register writes completed */
if (reg != I2C_TX_FIFO)
readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+ else if (i2c_dev->is_vi)
+ readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
}
static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
@@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
}
+static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned int reg, unsigned int len)
+{
+ u32 *data32 = data;
+
+ /*
+ * VI I2C controller has known hardware bug where writes get stuck
+ * when immediate multiple writes happen to TX_FIFO register.
+ * Recommended software work around is to read I2C register after
+ * each write to TX_FIFO register to flush out the data.
+ */
+ while (len--)
+ i2c_writel(i2c_dev, *data32++, reg);
+}
+
static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
unsigned int reg, unsigned int len)
{
@@ -533,7 +550,7 @@ static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev,
void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
u32 val;
- if (!i2c_dev->atomic_mode)
+ if (!i2c_dev->atomic_mode && !in_irq())
return readl_relaxed_poll_timeout(addr, val, !(val & mask),
delay_us, timeout_us);
@@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
i2c_dev->msg_buf_remaining = buf_remaining;
i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ if (i2c_dev->is_vi)
+ i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ else
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
buf += words_to_transfer * BYTES_PER_FIFO_WORD;
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 1c6b78ad5ade..b61bf53ec07a 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -2537,7 +2537,7 @@ int i3c_master_register(struct i3c_master_controller *master,
ret = i3c_master_bus_init(master);
if (ret)
- goto err_put_dev;
+ goto err_destroy_wq;
ret = device_add(&master->dev);
if (ret)
@@ -2568,6 +2568,9 @@ err_del_dev:
err_cleanup_bus:
i3c_master_bus_cleanup(master);
+err_destroy_wq:
+ destroy_workqueue(master->wq);
+
err_put_dev:
put_device(&master->dev);
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 4e80a1fcbf91..e68f15f4b4d0 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -21,3 +21,16 @@ config DW_I3C_MASTER
This driver can also be built as a module. If so, the module
will be called dw-i3c-master.
+
+config MIPI_I3C_HCI
+ tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
+ depends on I3C
+ help
+ Support for hardware following the MIPI Aliance's I3C Host Controller
+ Interface specification.
+
+ For details please see:
+ https://www.mipi.org/specifications/i3c-hci
+
+ This driver can also be built as a module. If so, the module will be
+ called mipi-i3c-hci.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index 7eea9e086144..b892fd4cafad 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
+obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci/
diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
new file mode 100644
index 000000000000..a658e7b8262c
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci.o
+mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \
+ cmd_v1.o cmd_v2.o \
+ dat_v1.o dct_v1.o
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd.h b/drivers/i3c/master/mipi-i3c-hci/cmd.h
new file mode 100644
index 000000000000..1d6dd2c5d01a
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common command/response related stuff
+ */
+
+#ifndef CMD_H
+#define CMD_H
+
+/*
+ * Those bits are common to all descriptor formats and
+ * may be manipulated by the core code.
+ */
+#define CMD_0_TOC W0_BIT_(31)
+#define CMD_0_ROC W0_BIT_(30)
+#define CMD_0_ATTR W0_MASK(2, 0)
+
+/*
+ * Response Descriptor Structure
+ */
+#define RESP_STATUS(resp) FIELD_GET(GENMASK(31, 28), resp)
+#define RESP_TID(resp) FIELD_GET(GENMASK(27, 24), resp)
+#define RESP_DATA_LENGTH(resp) FIELD_GET(GENMASK(21, 0), resp)
+
+#define RESP_ERR_FIELD GENMASK(31, 28)
+
+enum hci_resp_err {
+ RESP_SUCCESS = 0x0,
+ RESP_ERR_CRC = 0x1,
+ RESP_ERR_PARITY = 0x2,
+ RESP_ERR_FRAME = 0x3,
+ RESP_ERR_ADDR_HEADER = 0x4,
+ RESP_ERR_BCAST_NACK_7E = 0x4,
+ RESP_ERR_NACK = 0x5,
+ RESP_ERR_OVL = 0x6,
+ RESP_ERR_I3C_SHORT_READ = 0x7,
+ RESP_ERR_HC_TERMINATED = 0x8,
+ RESP_ERR_I2C_WR_DATA_NACK = 0x9,
+ RESP_ERR_BUS_XFER_ABORTED = 0x9,
+ RESP_ERR_NOT_SUPPORTED = 0xa,
+ RESP_ERR_ABORTED_WITH_CRC = 0xb,
+ /* 0xc to 0xf are reserved for transfer specific errors */
+};
+
+/* TID generation (4 bits wide in all cases) */
+#define hci_get_tid(bits) \
+ (atomic_inc_return_relaxed(&hci->next_cmd_tid) % (1U << 4))
+
+/* This abstracts operations with our command descriptor formats */
+struct hci_cmd_ops {
+ int (*prep_ccc)(struct i3c_hci *hci, struct hci_xfer *xfer,
+ u8 ccc_addr, u8 ccc_cmd, bool raw);
+ void (*prep_i3c_xfer)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer);
+ void (*prep_i2c_xfer)(struct i3c_hci *hci, struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer);
+ int (*perform_daa)(struct i3c_hci *hci);
+};
+
+/* Our various instances */
+extern const struct hci_cmd_ops mipi_i3c_hci_cmd_v1;
+extern const struct hci_cmd_ops mipi_i3c_hci_cmd_v2;
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
new file mode 100644
index 000000000000..d97c3175e0e2
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * I3C HCI v1.0/v1.1 Command Descriptor Handling
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i3c/master.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "dat.h"
+#include "dct.h"
+
+
+/*
+ * Address Assignment Command
+ */
+
+#define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
+
+#define CMD_A0_TOC W0_BIT_(31)
+#define CMD_A0_ROC W0_BIT_(30)
+#define CMD_A0_DEV_COUNT(v) FIELD_PREP(W0_MASK(29, 26), v)
+#define CMD_A0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_A0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Immediate Data Transfer Command
+ */
+
+#define CMD_0_ATTR_I FIELD_PREP(CMD_0_ATTR, 0x1)
+
+#define CMD_I1_DATA_BYTE_4(v) FIELD_PREP(W1_MASK(63, 56), v)
+#define CMD_I1_DATA_BYTE_3(v) FIELD_PREP(W1_MASK(55, 48), v)
+#define CMD_I1_DATA_BYTE_2(v) FIELD_PREP(W1_MASK(47, 40), v)
+#define CMD_I1_DATA_BYTE_1(v) FIELD_PREP(W1_MASK(39, 32), v)
+#define CMD_I1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
+#define CMD_I0_TOC W0_BIT_(31)
+#define CMD_I0_ROC W0_BIT_(30)
+#define CMD_I0_RNW W0_BIT_(29)
+#define CMD_I0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+#define CMD_I0_DTT(v) FIELD_PREP(W0_MASK(25, 23), v)
+#define CMD_I0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_I0_CP W0_BIT_(15)
+#define CMD_I0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_I0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Regular Data Transfer Command
+ */
+
+#define CMD_0_ATTR_R FIELD_PREP(CMD_0_ATTR, 0x0)
+
+#define CMD_R1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
+#define CMD_R1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
+#define CMD_R0_TOC W0_BIT_(31)
+#define CMD_R0_ROC W0_BIT_(30)
+#define CMD_R0_RNW W0_BIT_(29)
+#define CMD_R0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+#define CMD_R0_DBP W0_BIT_(25)
+#define CMD_R0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_R0_CP W0_BIT_(15)
+#define CMD_R0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_R0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Combo Transfer (Write + Write/Read) Command
+ */
+
+#define CMD_0_ATTR_C FIELD_PREP(CMD_0_ATTR, 0x3)
+
+#define CMD_C1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
+#define CMD_C1_OFFSET(v) FIELD_PREP(W1_MASK(47, 32), v)
+#define CMD_C0_TOC W0_BIT_(31)
+#define CMD_C0_ROC W0_BIT_(30)
+#define CMD_C0_RNW W0_BIT_(29)
+#define CMD_C0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+#define CMD_C0_16_BIT_SUBOFFSET W0_BIT_(25)
+#define CMD_C0_FIRST_PHASE_MODE W0_BIT_(24)
+#define CMD_C0_DATA_LENGTH_POSITION(v) FIELD_PREP(W0_MASK(23, 22), v)
+#define CMD_C0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_C0_CP W0_BIT_(15)
+#define CMD_C0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_C0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Internal Control Command
+ */
+
+#define CMD_0_ATTR_M FIELD_PREP(CMD_0_ATTR, 0x7)
+
+#define CMD_M1_VENDOR_SPECIFIC W1_MASK(63, 32)
+#define CMD_M0_MIPI_RESERVED W0_MASK(31, 12)
+#define CMD_M0_MIPI_CMD W0_MASK(11, 8)
+#define CMD_M0_VENDOR_INFO_PRESENT W0_BIT_( 7)
+#define CMD_M0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+
+/* Data Transfer Speed and Mode */
+enum hci_cmd_mode {
+ MODE_I3C_SDR0 = 0x0,
+ MODE_I3C_SDR1 = 0x1,
+ MODE_I3C_SDR2 = 0x2,
+ MODE_I3C_SDR3 = 0x3,
+ MODE_I3C_SDR4 = 0x4,
+ MODE_I3C_HDR_TSx = 0x5,
+ MODE_I3C_HDR_DDR = 0x6,
+ MODE_I3C_HDR_BT = 0x7,
+ MODE_I3C_Fm_FmP = 0x8,
+ MODE_I2C_Fm = 0x0,
+ MODE_I2C_FmP = 0x1,
+ MODE_I2C_UD1 = 0x2,
+ MODE_I2C_UD2 = 0x3,
+ MODE_I2C_UD3 = 0x4,
+};
+
+static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i3c >= 12500000)
+ return MODE_I3C_SDR0;
+ if (bus->scl_rate.i3c > 8000000)
+ return MODE_I3C_SDR1;
+ if (bus->scl_rate.i3c > 6000000)
+ return MODE_I3C_SDR2;
+ if (bus->scl_rate.i3c > 4000000)
+ return MODE_I3C_SDR3;
+ if (bus->scl_rate.i3c > 2000000)
+ return MODE_I3C_SDR4;
+ return MODE_I3C_Fm_FmP;
+}
+
+static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i2c >= 1000000)
+ return MODE_I2C_FmP;
+ return MODE_I2C_Fm;
+}
+
+static void fill_data_bytes(struct hci_xfer *xfer, u8 *data,
+ unsigned int data_len)
+{
+ xfer->cmd_desc[1] = 0;
+ switch (data_len) {
+ case 4:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_4(data[3]);
+ fallthrough;
+ case 3:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_3(data[2]);
+ fallthrough;
+ case 2:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_2(data[1]);
+ fallthrough;
+ case 1:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_1(data[0]);
+ fallthrough;
+ case 0:
+ break;
+ }
+ /* we consumed all the data with the cmd descriptor */
+ xfer->data = NULL;
+}
+
+static int hci_cmd_v1_prep_ccc(struct i3c_hci *hci,
+ struct hci_xfer *xfer,
+ u8 ccc_addr, u8 ccc_cmd, bool raw)
+{
+ unsigned int dat_idx = 0;
+ enum hci_cmd_mode mode = get_i3c_mode(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+ int ret;
+
+ /* this should never happen */
+ if (WARN_ON(raw))
+ return -EINVAL;
+
+ if (ccc_addr != I3C_BROADCAST_ADDR) {
+ ret = mipi_i3c_hci_dat_v1.get_index(hci, ccc_addr);
+ if (ret < 0)
+ return ret;
+ dat_idx = ret;
+ }
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_I |
+ CMD_I0_TID(xfer->cmd_tid) |
+ CMD_I0_CMD(ccc_cmd) | CMD_I0_CP |
+ CMD_I0_DEV_INDEX(dat_idx) |
+ CMD_I0_DTT(data_len) |
+ CMD_I0_MODE(mode);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_R |
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_CMD(ccc_cmd) | CMD_R0_CP |
+ CMD_R0_DEV_INDEX(dat_idx) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
+ }
+
+ return 0;
+}
+
+static void hci_cmd_v1_prep_i3c_xfer(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ unsigned int dat_idx = dev_data->dat_idx;
+ enum hci_cmd_mode mode = get_i3c_mode(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_I |
+ CMD_I0_TID(xfer->cmd_tid) |
+ CMD_I0_DEV_INDEX(dat_idx) |
+ CMD_I0_DTT(data_len) |
+ CMD_I0_MODE(mode);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_R |
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_DEV_INDEX(dat_idx) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
+ }
+}
+
+static void hci_cmd_v1_prep_i2c_xfer(struct i3c_hci *hci,
+ struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
+ unsigned int dat_idx = dev_data->dat_idx;
+ enum hci_cmd_mode mode = get_i2c_mode(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_I |
+ CMD_I0_TID(xfer->cmd_tid) |
+ CMD_I0_DEV_INDEX(dat_idx) |
+ CMD_I0_DTT(data_len) |
+ CMD_I0_MODE(mode);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_R |
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_DEV_INDEX(dat_idx) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
+ }
+}
+
+static int hci_cmd_v1_daa(struct i3c_hci *hci)
+{
+ struct hci_xfer *xfer;
+ int ret, dat_idx = -1;
+ u8 next_addr = 0;
+ u64 pid;
+ unsigned int dcr, bcr;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ xfer = hci_alloc_xfer(2);
+ if (!xfer)
+ return -ENOMEM;
+
+ /*
+ * Simple for now: we allocate a temporary DAT entry, do a single
+ * DAA, register the device which will allocate its own DAT entry
+ * via the core callback, then free the temporary DAT entry.
+ * Loop until there is no more devices to assign an address to.
+ * Yes, there is room for improvements.
+ */
+ for (;;) {
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0)
+ break;
+ dat_idx = ret;
+ ret = i3c_master_get_free_addr(&hci->master, next_addr);
+ if (ret < 0)
+ break;
+ next_addr = ret;
+
+ DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
+ mipi_i3c_hci_dct_index_reset(hci);
+
+ xfer->cmd_tid = hci_get_tid();
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_A |
+ CMD_A0_TID(xfer->cmd_tid) |
+ CMD_A0_CMD(I3C_CCC_ENTDAA) |
+ CMD_A0_DEV_INDEX(dat_idx) |
+ CMD_A0_DEV_COUNT(1) |
+ CMD_A0_ROC | CMD_A0_TOC;
+ xfer->cmd_desc[1] = 0;
+ hci->io->queue_xfer(hci, xfer, 1);
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, 1)) {
+ ret = -ETIME;
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
+ RESP_STATUS(xfer[0].response) == 1) {
+ ret = 0; /* no more devices to be assigned */
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+
+ i3c_hci_dct_get_val(hci, 0, &pid, &dcr, &bcr);
+ DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
+
+ mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
+ dat_idx = -1;
+
+ /*
+ * TODO: Extend the subsystem layer to allow for registering
+ * new device and provide BCR/DCR/PID at the same time.
+ */
+ ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
+ if (ret)
+ break;
+ }
+
+ if (dat_idx >= 0)
+ mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
+ hci_free_xfer(xfer, 1);
+ return ret;
+}
+
+const struct hci_cmd_ops mipi_i3c_hci_cmd_v1 = {
+ .prep_ccc = hci_cmd_v1_prep_ccc,
+ .prep_i3c_xfer = hci_cmd_v1_prep_i3c_xfer,
+ .prep_i2c_xfer = hci_cmd_v1_prep_i2c_xfer,
+ .perform_daa = hci_cmd_v1_daa,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
new file mode 100644
index 000000000000..4493b2b067cb
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * I3C HCI v2.0 Command Descriptor Handling
+ *
+ * Note: The I3C HCI v2.0 spec is still in flux. The code here will change.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i3c/master.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "xfer_mode_rate.h"
+
+
+/*
+ * Unified Data Transfer Command
+ */
+
+#define CMD_0_ATTR_U FIELD_PREP(CMD_0_ATTR, 0x4)
+
+#define CMD_U3_HDR_TSP_ML_CTRL(v) FIELD_PREP(W3_MASK(107, 104), v)
+#define CMD_U3_IDB4(v) FIELD_PREP(W3_MASK(103, 96), v)
+#define CMD_U3_HDR_CMD(v) FIELD_PREP(W3_MASK(103, 96), v)
+#define CMD_U2_IDB3(v) FIELD_PREP(W2_MASK( 95, 88), v)
+#define CMD_U2_HDR_BT(v) FIELD_PREP(W2_MASK( 95, 88), v)
+#define CMD_U2_IDB2(v) FIELD_PREP(W2_MASK( 87, 80), v)
+#define CMD_U2_BT_CMD2(v) FIELD_PREP(W2_MASK( 87, 80), v)
+#define CMD_U2_IDB1(v) FIELD_PREP(W2_MASK( 79, 72), v)
+#define CMD_U2_BT_CMD1(v) FIELD_PREP(W2_MASK( 79, 72), v)
+#define CMD_U2_IDB0(v) FIELD_PREP(W2_MASK( 71, 64), v)
+#define CMD_U2_BT_CMD0(v) FIELD_PREP(W2_MASK( 71, 64), v)
+#define CMD_U1_ERR_HANDLING(v) FIELD_PREP(W1_MASK( 63, 62), v)
+#define CMD_U1_ADD_FUNC(v) FIELD_PREP(W1_MASK( 61, 56), v)
+#define CMD_U1_COMBO_XFER W1_BIT_( 55)
+#define CMD_U1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
+#define CMD_U0_TOC W0_BIT_( 31)
+#define CMD_U0_ROC W0_BIT_( 30)
+#define CMD_U0_MAY_YIELD W0_BIT_( 29)
+#define CMD_U0_NACK_RCNT(v) FIELD_PREP(W0_MASK( 28, 27), v)
+#define CMD_U0_IDB_COUNT(v) FIELD_PREP(W0_MASK( 26, 24), v)
+#define CMD_U0_MODE_INDEX(v) FIELD_PREP(W0_MASK( 22, 18), v)
+#define CMD_U0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
+#define CMD_U0_DEV_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
+#define CMD_U0_RnW W0_BIT_( 7)
+#define CMD_U0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Address Assignment Command
+ */
+
+#define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
+
+#define CMD_A1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
+#define CMD_A0_TOC W0_BIT_( 31)
+#define CMD_A0_ROC W0_BIT_( 30)
+#define CMD_A0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
+#define CMD_A0_ASSIGN_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
+#define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+
+static unsigned int get_i3c_rate_idx(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i3c >= 12000000)
+ return XFERRATE_I3C_SDR0;
+ if (bus->scl_rate.i3c > 8000000)
+ return XFERRATE_I3C_SDR1;
+ if (bus->scl_rate.i3c > 6000000)
+ return XFERRATE_I3C_SDR2;
+ if (bus->scl_rate.i3c > 4000000)
+ return XFERRATE_I3C_SDR3;
+ if (bus->scl_rate.i3c > 2000000)
+ return XFERRATE_I3C_SDR4;
+ return XFERRATE_I3C_SDR_FM_FMP;
+}
+
+static unsigned int get_i2c_rate_idx(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i2c >= 1000000)
+ return XFERRATE_I2C_FMP;
+ return XFERRATE_I2C_FM;
+}
+
+static void hci_cmd_v2_prep_private_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer,
+ u8 addr, unsigned int mode,
+ unsigned int rate)
+{
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 5) {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ CMD_U0_DEV_ADDRESS(addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode) |
+ CMD_U0_IDB_COUNT(data_len);
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(0);
+ xfer->cmd_desc[2] = 0;
+ xfer->cmd_desc[3] = 0;
+ switch (data_len) {
+ case 5:
+ xfer->cmd_desc[3] |= CMD_U3_IDB4(data[4]);
+ fallthrough;
+ case 4:
+ xfer->cmd_desc[2] |= CMD_U2_IDB3(data[3]);
+ fallthrough;
+ case 3:
+ xfer->cmd_desc[2] |= CMD_U2_IDB2(data[2]);
+ fallthrough;
+ case 2:
+ xfer->cmd_desc[2] |= CMD_U2_IDB1(data[1]);
+ fallthrough;
+ case 1:
+ xfer->cmd_desc[2] |= CMD_U2_IDB0(data[0]);
+ fallthrough;
+ case 0:
+ break;
+ }
+ /* we consumed all the data with the cmd descriptor */
+ xfer->data = NULL;
+ } else {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ (rnw ? CMD_U0_RnW : 0) |
+ CMD_U0_DEV_ADDRESS(addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode);
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(data_len);
+ xfer->cmd_desc[2] = 0;
+ xfer->cmd_desc[3] = 0;
+ }
+}
+
+static int hci_cmd_v2_prep_ccc(struct i3c_hci *hci, struct hci_xfer *xfer,
+ u8 ccc_addr, u8 ccc_cmd, bool raw)
+{
+ unsigned int mode = XFERMODE_IDX_I3C_SDR;
+ unsigned int rate = get_i3c_rate_idx(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ if (raw && ccc_addr != I3C_BROADCAST_ADDR) {
+ hci_cmd_v2_prep_private_xfer(hci, xfer, ccc_addr, mode, rate);
+ return 0;
+ }
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ CMD_U0_DEV_ADDRESS(ccc_addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode) |
+ CMD_U0_IDB_COUNT(data_len + (!raw ? 0 : 1));
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(0);
+ xfer->cmd_desc[2] =
+ CMD_U2_IDB0(ccc_cmd);
+ xfer->cmd_desc[3] = 0;
+ switch (data_len) {
+ case 4:
+ xfer->cmd_desc[3] |= CMD_U3_IDB4(data[3]);
+ fallthrough;
+ case 3:
+ xfer->cmd_desc[2] |= CMD_U2_IDB3(data[2]);
+ fallthrough;
+ case 2:
+ xfer->cmd_desc[2] |= CMD_U2_IDB2(data[1]);
+ fallthrough;
+ case 1:
+ xfer->cmd_desc[2] |= CMD_U2_IDB1(data[0]);
+ fallthrough;
+ case 0:
+ break;
+ }
+ /* we consumed all the data with the cmd descriptor */
+ xfer->data = NULL;
+ } else {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ (rnw ? CMD_U0_RnW : 0) |
+ CMD_U0_DEV_ADDRESS(ccc_addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode) |
+ CMD_U0_IDB_COUNT(!raw ? 0 : 1);
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(data_len);
+ xfer->cmd_desc[2] =
+ CMD_U2_IDB0(ccc_cmd);
+ xfer->cmd_desc[3] = 0;
+ }
+
+ return 0;
+}
+
+static void hci_cmd_v2_prep_i3c_xfer(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ unsigned int mode = XFERMODE_IDX_I3C_SDR;
+ unsigned int rate = get_i3c_rate_idx(hci);
+ u8 addr = dev->info.dyn_addr;
+
+ hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
+}
+
+static void hci_cmd_v2_prep_i2c_xfer(struct i3c_hci *hci,
+ struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ unsigned int mode = XFERMODE_IDX_I2C;
+ unsigned int rate = get_i2c_rate_idx(hci);
+ u8 addr = dev->addr;
+
+ hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
+}
+
+static int hci_cmd_v2_daa(struct i3c_hci *hci)
+{
+ struct hci_xfer *xfer;
+ int ret;
+ u8 next_addr = 0;
+ u32 device_id[2];
+ u64 pid;
+ unsigned int dcr, bcr;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ xfer = hci_alloc_xfer(2);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer[0].data = &device_id;
+ xfer[0].data_len = 8;
+ xfer[0].rnw = true;
+ xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8);
+ xfer[1].completion = &done;
+
+ for (;;) {
+ ret = i3c_master_get_free_addr(&hci->master, next_addr);
+ if (ret < 0)
+ break;
+ next_addr = ret;
+ DBG("next_addr = 0x%02x", next_addr);
+ xfer[0].cmd_tid = hci_get_tid();
+ xfer[0].cmd_desc[0] =
+ CMD_0_ATTR_A |
+ CMD_A0_TID(xfer[0].cmd_tid) |
+ CMD_A0_ROC;
+ xfer[1].cmd_tid = hci_get_tid();
+ xfer[1].cmd_desc[0] =
+ CMD_0_ATTR_A |
+ CMD_A0_TID(xfer[1].cmd_tid) |
+ CMD_A0_ASSIGN_ADDRESS(next_addr) |
+ CMD_A0_ROC |
+ CMD_A0_TOC;
+ hci->io->queue_xfer(hci, xfer, 2);
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, 2)) {
+ ret = -ETIME;
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+ ret = 0; /* no more devices to be assigned */
+ break;
+ }
+ if (RESP_STATUS(xfer[1].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+
+ pid = FIELD_GET(W1_MASK(47, 32), device_id[1]);
+ pid = (pid << 32) | device_id[0];
+ bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
+ dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
+ DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
+ /*
+ * TODO: Extend the subsystem layer to allow for registering
+ * new device and provide BCR/DCR/PID at the same time.
+ */
+ ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
+ if (ret)
+ break;
+ }
+
+ hci_free_xfer(xfer, 2);
+ return ret;
+}
+
+const struct hci_cmd_ops mipi_i3c_hci_cmd_v2 = {
+ .prep_ccc = hci_cmd_v2_prep_ccc,
+ .prep_i3c_xfer = hci_cmd_v2_prep_i3c_xfer,
+ .prep_i2c_xfer = hci_cmd_v2_prep_i2c_xfer,
+ .perform_daa = hci_cmd_v2_daa,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
new file mode 100644
index 000000000000..500abd27fb22
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -0,0 +1,798 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Core driver code with main interface to the I3C subsystem.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "hci.h"
+#include "ext_caps.h"
+#include "cmd.h"
+#include "dat.h"
+
+
+/*
+ * Host Controller Capabilities and Operation Registers
+ */
+
+#define reg_read(r) readl(hci->base_regs + (r))
+#define reg_write(r, v) writel(v, hci->base_regs + (r))
+#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
+#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
+
+#define HCI_VERSION 0x00 /* HCI Version (in BCD) */
+
+#define HC_CONTROL 0x04
+#define HC_CONTROL_BUS_ENABLE BIT(31)
+#define HC_CONTROL_RESUME BIT(30)
+#define HC_CONTROL_ABORT BIT(29)
+#define HC_CONTROL_HALT_ON_CMD_TIMEOUT BIT(12)
+#define HC_CONTROL_HOT_JOIN_CTRL BIT(8) /* Hot-Join ACK/NACK Control */
+#define HC_CONTROL_I2C_TARGET_PRESENT BIT(7)
+#define HC_CONTROL_PIO_MODE BIT(6) /* DMA/PIO Mode Selector */
+#define HC_CONTROL_DATA_BIG_ENDIAN BIT(4)
+#define HC_CONTROL_IBA_INCLUDE BIT(0) /* Include I3C Broadcast Address */
+
+#define MASTER_DEVICE_ADDR 0x08 /* Master Device Address */
+#define MASTER_DYNAMIC_ADDR_VALID BIT(31) /* Dynamic Address is Valid */
+#define MASTER_DYNAMIC_ADDR(v) FIELD_PREP(GENMASK(22, 16), v)
+
+#define HC_CAPABILITIES 0x0c
+#define HC_CAP_SG_DC_EN BIT(30)
+#define HC_CAP_SG_IBI_EN BIT(29)
+#define HC_CAP_SG_CR_EN BIT(28)
+#define HC_CAP_MAX_DATA_LENGTH GENMASK(24, 22)
+#define HC_CAP_CMD_SIZE GENMASK(21, 20)
+#define HC_CAP_DIRECT_COMMANDS_EN BIT(18)
+#define HC_CAP_MULTI_LANE_EN BIT(15)
+#define HC_CAP_CMD_CCC_DEFBYTE BIT(10)
+#define HC_CAP_HDR_BT_EN BIT(8)
+#define HC_CAP_HDR_TS_EN BIT(7)
+#define HC_CAP_HDR_DDR_EN BIT(6)
+#define HC_CAP_NON_CURRENT_MASTER_CAP BIT(5) /* master handoff capable */
+#define HC_CAP_DATA_BYTE_CFG_EN BIT(4) /* endian selection possible */
+#define HC_CAP_AUTO_COMMAND BIT(3)
+#define HC_CAP_COMBO_COMMAND BIT(2)
+
+#define RESET_CONTROL 0x10
+#define BUS_RESET BIT(31)
+#define BUS_RESET_TYPE GENMASK(30, 29)
+#define IBI_QUEUE_RST BIT(5)
+#define RX_FIFO_RST BIT(4)
+#define TX_FIFO_RST BIT(3)
+#define RESP_QUEUE_RST BIT(2)
+#define CMD_QUEUE_RST BIT(1)
+#define SOFT_RST BIT(0) /* Core Reset */
+
+#define PRESENT_STATE 0x14
+#define STATE_CURRENT_MASTER BIT(2)
+
+#define INTR_STATUS 0x20
+#define INTR_STATUS_ENABLE 0x24
+#define INTR_SIGNAL_ENABLE 0x28
+#define INTR_FORCE 0x2c
+#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
+#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
+#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
+#define INTR_HC_PIO BIT(8) /* cascaded PIO interrupt */
+#define INTR_HC_RINGS GENMASK(7, 0)
+
+#define DAT_SECTION 0x30 /* Device Address Table */
+#define DAT_ENTRY_SIZE GENMASK(31, 28)
+#define DAT_TABLE_SIZE GENMASK(18, 12)
+#define DAT_TABLE_OFFSET GENMASK(11, 0)
+
+#define DCT_SECTION 0x34 /* Device Characteristics Table */
+#define DCT_ENTRY_SIZE GENMASK(31, 28)
+#define DCT_TABLE_INDEX GENMASK(23, 19)
+#define DCT_TABLE_SIZE GENMASK(18, 12)
+#define DCT_TABLE_OFFSET GENMASK(11, 0)
+
+#define RING_HEADERS_SECTION 0x38
+#define RING_HEADERS_OFFSET GENMASK(15, 0)
+
+#define PIO_SECTION 0x3c
+#define PIO_REGS_OFFSET GENMASK(15, 0) /* PIO Offset */
+
+#define EXT_CAPS_SECTION 0x40
+#define EXT_CAPS_OFFSET GENMASK(15, 0)
+
+#define IBI_NOTIFY_CTRL 0x58 /* IBI Notify Control */
+#define IBI_NOTIFY_SIR_REJECTED BIT(3) /* Rejected Target Interrupt Request */
+#define IBI_NOTIFY_MR_REJECTED BIT(1) /* Rejected Master Request Control */
+#define IBI_NOTIFY_HJ_REJECTED BIT(0) /* Rejected Hot-Join Control */
+
+#define DEV_CTX_BASE_LO 0x60
+#define DEV_CTX_BASE_HI 0x64
+
+
+static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
+{
+ return container_of(m, struct i3c_hci, master);
+}
+
+static int i3c_hci_bus_init(struct i3c_master_controller *m)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_device_info info;
+ int ret;
+
+ DBG("");
+
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
+ ret = mipi_i3c_hci_dat_v1.init(hci);
+ if (ret)
+ return ret;
+ }
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+ reg_write(MASTER_DEVICE_ADDR,
+ MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+ ret = i3c_master_set_info(m, &info);
+ if (ret)
+ return ret;
+
+ ret = hci->io->init(hci);
+ if (ret)
+ return ret;
+
+ reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
+
+ return 0;
+}
+
+static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ DBG("");
+
+ reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ hci->io->cleanup(hci);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.cleanup(hci);
+}
+
+void mipi_i3c_hci_resume(struct i3c_hci *hci)
+{
+ /* the HC_CONTROL_RESUME bit is R/W1C so just read and write back */
+ reg_write(HC_CONTROL, reg_read(HC_CONTROL));
+}
+
+/* located here rather than pio.c because needed bits are in core reg space */
+void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
+{
+ reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
+}
+
+/* located here rather than dct.c because needed bits are in core reg space */
+void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
+{
+ reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
+}
+
+static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct hci_xfer *xfer;
+ bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
+ bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
+ unsigned int nxfers = ccc->ndests + prefixed;
+ DECLARE_COMPLETION_ONSTACK(done);
+ int i, last, ret = 0;
+
+ DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
+ ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ if (prefixed) {
+ xfer->data = NULL;
+ xfer->data_len = 0;
+ xfer->rnw = false;
+ hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
+ ccc->id, true);
+ xfer++;
+ }
+
+ for (i = 0; i < nxfers - prefixed; i++) {
+ xfer[i].data = ccc->dests[i].payload.data;
+ xfer[i].data_len = ccc->dests[i].payload.len;
+ xfer[i].rnw = ccc->rnw;
+ ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
+ ccc->id, raw);
+ if (ret)
+ goto out;
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ }
+ last = i - 1;
+ xfer[last].cmd_desc[0] |= CMD_0_TOC;
+ xfer[last].completion = &done;
+
+ if (prefixed)
+ xfer--;
+
+ ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ if (ret)
+ goto out;
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, nxfers)) {
+ ret = -ETIME;
+ goto out;
+ }
+ for (i = prefixed; i < nxfers; i++) {
+ if (ccc->rnw)
+ ccc->dests[i - prefixed].payload.len =
+ RESP_DATA_LENGTH(xfer[i].response);
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ if (ccc->rnw)
+ DBG("got: %*ph",
+ ccc->dests[0].payload.len, ccc->dests[0].payload.data);
+
+out:
+ hci_free_xfer(xfer, nxfers);
+ return ret;
+}
+
+static int i3c_hci_daa(struct i3c_master_controller *m)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ DBG("");
+
+ return hci->cmd->perform_daa(hci);
+}
+
+static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *i3c_xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct hci_xfer *xfer;
+ DECLARE_COMPLETION_ONSTACK(done);
+ unsigned int size_limit;
+ int i, last, ret = 0;
+
+ DBG("nxfers = %d", nxfers);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
+
+ for (i = 0; i < nxfers; i++) {
+ xfer[i].data_len = i3c_xfers[i].len;
+ ret = -EFBIG;
+ if (xfer[i].data_len >= size_limit)
+ goto out;
+ xfer[i].rnw = i3c_xfers[i].rnw;
+ if (i3c_xfers[i].rnw) {
+ xfer[i].data = i3c_xfers[i].data.in;
+ } else {
+ /* silence the const qualifier warning with a cast */
+ xfer[i].data = (void *) i3c_xfers[i].data.out;
+ }
+ hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ }
+ last = i - 1;
+ xfer[last].cmd_desc[0] |= CMD_0_TOC;
+ xfer[last].completion = &done;
+
+ ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ if (ret)
+ goto out;
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, nxfers)) {
+ ret = -ETIME;
+ goto out;
+ }
+ for (i = 0; i < nxfers; i++) {
+ if (i3c_xfers[i].rnw)
+ i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+out:
+ hci_free_xfer(xfer, nxfers);
+ return ret;
+}
+
+static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *i2c_xfers, int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct hci_xfer *xfer;
+ DECLARE_COMPLETION_ONSTACK(done);
+ int i, last, ret = 0;
+
+ DBG("nxfers = %d", nxfers);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ xfer[i].data = i2c_xfers[i].buf;
+ xfer[i].data_len = i2c_xfers[i].len;
+ xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
+ hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ }
+ last = i - 1;
+ xfer[last].cmd_desc[0] |= CMD_0_TOC;
+ xfer[last].completion = &done;
+
+ ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ if (ret)
+ goto out;
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, nxfers)) {
+ ret = -ETIME;
+ goto out;
+ }
+ for (i = 0; i < nxfers; i++) {
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+out:
+ hci_free_xfer(xfer, nxfers);
+ return ret;
+}
+
+static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data;
+ int ret;
+
+ DBG("");
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0) {
+ kfree(dev_data);
+ return ret;
+ }
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr);
+ dev_data->dat_idx = ret;
+ }
+ i3c_dev_set_master_data(dev, dev_data);
+ return 0;
+}
+
+static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ DBG("");
+
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
+ dev->info.dyn_addr);
+ return 0;
+}
+
+static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ DBG("");
+
+ i3c_dev_set_master_data(dev, NULL);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
+ kfree(dev_data);
+}
+
+static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data;
+ int ret;
+
+ DBG("");
+
+ if (hci->cmd != &mipi_i3c_hci_cmd_v1)
+ return 0;
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0) {
+ kfree(dev_data);
+ return ret;
+ }
+ mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
+ mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
+ dev_data->dat_idx = ret;
+ i2c_dev_set_master_data(dev, dev_data);
+ return 0;
+}
+
+static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
+
+ DBG("");
+
+ if (dev_data) {
+ i2c_dev_set_master_data(dev, NULL);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
+ kfree(dev_data);
+ }
+}
+
+static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ unsigned int dat_idx = dev_data->dat_idx;
+
+ if (req->max_payload_len != 0)
+ mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
+ else
+ mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
+ return hci->io->request_ibi(hci, dev, req);
+}
+
+static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ hci->io->free_ibi(hci, dev);
+}
+
+static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
+ return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
+ return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ hci->io->recycle_ibi_slot(hci, dev, slot);
+}
+
+static const struct i3c_master_controller_ops i3c_hci_ops = {
+ .bus_init = i3c_hci_bus_init,
+ .bus_cleanup = i3c_hci_bus_cleanup,
+ .do_daa = i3c_hci_daa,
+ .send_ccc_cmd = i3c_hci_send_ccc_cmd,
+ .priv_xfers = i3c_hci_priv_xfers,
+ .i2c_xfers = i3c_hci_i2c_xfers,
+ .attach_i3c_dev = i3c_hci_attach_i3c_dev,
+ .reattach_i3c_dev = i3c_hci_reattach_i3c_dev,
+ .detach_i3c_dev = i3c_hci_detach_i3c_dev,
+ .attach_i2c_dev = i3c_hci_attach_i2c_dev,
+ .detach_i2c_dev = i3c_hci_detach_i2c_dev,
+ .request_ibi = i3c_hci_request_ibi,
+ .free_ibi = i3c_hci_free_ibi,
+ .enable_ibi = i3c_hci_enable_ibi,
+ .disable_ibi = i3c_hci_disable_ibi,
+ .recycle_ibi_slot = i3c_hci_recycle_ibi_slot,
+};
+
+static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
+{
+ struct i3c_hci *hci = dev_id;
+ irqreturn_t result = IRQ_NONE;
+ u32 val;
+
+ val = reg_read(INTR_STATUS);
+ DBG("INTR_STATUS = %#x", val);
+
+ if (val) {
+ reg_write(INTR_STATUS, val);
+ } else {
+ /* v1.0 does not have PIO cascaded notification bits */
+ val |= INTR_HC_PIO;
+ }
+
+ if (val & INTR_HC_RESET_CANCEL) {
+ DBG("cancelled reset");
+ val &= ~INTR_HC_RESET_CANCEL;
+ }
+ if (val & INTR_HC_INTERNAL_ERR) {
+ dev_err(&hci->master.dev, "Host Controller Internal Error\n");
+ val &= ~INTR_HC_INTERNAL_ERR;
+ }
+ if (val & INTR_HC_PIO) {
+ hci->io->irq_handler(hci, 0);
+ val &= ~INTR_HC_PIO;
+ }
+ if (val & INTR_HC_RINGS) {
+ hci->io->irq_handler(hci, val & INTR_HC_RINGS);
+ val &= ~INTR_HC_RINGS;
+ }
+ if (val)
+ dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
+ else
+ result = IRQ_HANDLED;
+
+ return result;
+}
+
+static int i3c_hci_init(struct i3c_hci *hci)
+{
+ u32 regval, offset;
+ int ret;
+
+ /* Validate HCI hardware version */
+ regval = reg_read(HCI_VERSION);
+ hci->version_major = (regval >> 8) & 0xf;
+ hci->version_minor = (regval >> 4) & 0xf;
+ hci->revision = regval & 0xf;
+ dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
+ hci->version_major, hci->version_minor, hci->revision);
+ /* known versions */
+ switch (regval & ~0xf) {
+ case 0x100: /* version 1.0 */
+ case 0x110: /* version 1.1 */
+ case 0x200: /* version 2.0 */
+ break;
+ default:
+ dev_err(&hci->master.dev, "unsupported HCI version\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ hci->caps = reg_read(HC_CAPABILITIES);
+ DBG("caps = %#x", hci->caps);
+
+ regval = reg_read(DAT_SECTION);
+ offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
+ hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
+ hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
+ hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval);
+ dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
+ hci->DAT_entries, hci->DAT_entry_size * 4, offset);
+
+ regval = reg_read(DCT_SECTION);
+ offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
+ hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
+ hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
+ hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval);
+ dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
+ hci->DCT_entries, hci->DCT_entry_size * 4, offset);
+
+ regval = reg_read(RING_HEADERS_SECTION);
+ offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
+ hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
+ dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
+
+ regval = reg_read(PIO_SECTION);
+ offset = FIELD_GET(PIO_REGS_OFFSET, regval);
+ hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
+ dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset);
+
+ regval = reg_read(EXT_CAPS_SECTION);
+ offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
+ hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
+ dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
+
+ ret = i3c_hci_parse_ext_caps(hci);
+ if (ret)
+ return ret;
+
+ /*
+ * Now let's reset the hardware.
+ * SOFT_RST must be clear before we write to it.
+ * Then we must wait until it clears again.
+ */
+ ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
+ !(regval & SOFT_RST), 1, 10000);
+ if (ret)
+ return -ENXIO;
+ reg_write(RESET_CONTROL, SOFT_RST);
+ ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
+ !(regval & SOFT_RST), 1, 10000);
+ if (ret)
+ return -ENXIO;
+
+ /* Disable all interrupts and allow all signal updates */
+ reg_write(INTR_SIGNAL_ENABLE, 0x0);
+ reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+
+ /* Make sure our data ordering fits the host's */
+ regval = reg_read(HC_CONTROL);
+ if (IS_ENABLED(CONFIG_BIG_ENDIAN)) {
+ if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
+ regval |= HC_CONTROL_DATA_BIG_ENDIAN;
+ reg_write(HC_CONTROL, regval);
+ regval = reg_read(HC_CONTROL);
+ if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
+ dev_err(&hci->master.dev, "cannot set BE mode\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ } else {
+ if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
+ regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
+ reg_write(HC_CONTROL, regval);
+ regval = reg_read(HC_CONTROL);
+ if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
+ dev_err(&hci->master.dev, "cannot clear BE mode\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ /* Select our command descriptor model */
+ switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
+ case 0:
+ hci->cmd = &mipi_i3c_hci_cmd_v1;
+ break;
+ case 1:
+ hci->cmd = &mipi_i3c_hci_cmd_v2;
+ break;
+ default:
+ dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
+ return -EINVAL;
+ }
+
+ /* Try activating DMA operations first */
+ if (hci->RHS_regs) {
+ reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
+ if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) {
+ dev_err(&hci->master.dev, "PIO mode is stuck\n");
+ ret = -EIO;
+ } else {
+ hci->io = &mipi_i3c_hci_dma;
+ dev_info(&hci->master.dev, "Using DMA\n");
+ }
+ }
+
+ /* If no DMA, try PIO */
+ if (!hci->io && hci->PIO_regs) {
+ reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
+ if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
+ dev_err(&hci->master.dev, "DMA mode is stuck\n");
+ ret = -EIO;
+ } else {
+ hci->io = &mipi_i3c_hci_pio;
+ dev_info(&hci->master.dev, "Using PIO\n");
+ }
+ }
+
+ if (!hci->io) {
+ dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
+ if (!ret)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i3c_hci_probe(struct platform_device *pdev)
+{
+ struct i3c_hci *hci;
+ int irq, ret;
+
+ hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
+ if (!hci)
+ return -ENOMEM;
+ hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hci->base_regs))
+ return PTR_ERR(hci->base_regs);
+
+ platform_set_drvdata(pdev, hci);
+ /* temporary for dev_printk's, to be replaced in i3c_master_register */
+ hci->master.dev.init_name = dev_name(&pdev->dev);
+
+ ret = i3c_hci_init(hci);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
+ 0, NULL, hci);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_register(&hci->master, &pdev->dev,
+ &i3c_hci_ops, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int i3c_hci_remove(struct platform_device *pdev)
+{
+ struct i3c_hci *hci = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&hci->master);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct __maybe_unused of_device_id i3c_hci_of_match[] = {
+ { .compatible = "mipi-i3c-hci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
+
+static struct platform_driver i3c_hci_driver = {
+ .probe = i3c_hci_probe,
+ .remove = i3c_hci_remove,
+ .driver = {
+ .name = "mipi-i3c-hci",
+ .of_match_table = of_match_ptr(i3c_hci_of_match),
+ },
+};
+module_platform_driver(i3c_hci_driver);
+
+MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
+MODULE_DESCRIPTION("MIPI I3C HCI driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat.h b/drivers/i3c/master/mipi-i3c-hci/dat.h
new file mode 100644
index 000000000000..1f0f345c3daf
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dat.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common DAT related stuff
+ */
+
+#ifndef DAT_H
+#define DAT_H
+
+/* Global DAT flags */
+#define DAT_0_I2C_DEVICE W0_BIT_(31)
+#define DAT_0_SIR_REJECT W0_BIT_(13)
+#define DAT_0_IBI_PAYLOAD W0_BIT_(12)
+
+struct hci_dat_ops {
+ int (*init)(struct i3c_hci *hci);
+ void (*cleanup)(struct i3c_hci *hci);
+ int (*alloc_entry)(struct i3c_hci *hci);
+ void (*free_entry)(struct i3c_hci *hci, unsigned int dat_idx);
+ void (*set_dynamic_addr)(struct i3c_hci *hci, unsigned int dat_idx, u8 addr);
+ void (*set_static_addr)(struct i3c_hci *hci, unsigned int dat_idx, u8 addr);
+ void (*set_flags)(struct i3c_hci *hci, unsigned int dat_idx, u32 w0, u32 w1);
+ void (*clear_flags)(struct i3c_hci *hci, unsigned int dat_idx, u32 w0, u32 w1);
+ int (*get_index)(struct i3c_hci *hci, u8 address);
+};
+
+extern const struct hci_dat_ops mipi_i3c_hci_dat_v1;
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
new file mode 100644
index 000000000000..783e551a2c85
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "dat.h"
+
+
+/*
+ * Device Address Table Structure
+ */
+
+#define DAT_1_AUTOCMD_HDR_CODE W1_MASK(58, 51)
+#define DAT_1_AUTOCMD_MODE W1_MASK(50, 48)
+#define DAT_1_AUTOCMD_VALUE W1_MASK(47, 40)
+#define DAT_1_AUTOCMD_MASK W1_MASK(39, 32)
+/* DAT_0_I2C_DEVICE W0_BIT_(31) */
+#define DAT_0_DEV_NACK_RETRY_CNT W0_MASK(30, 29)
+#define DAT_0_RING_ID W0_MASK(28, 26)
+#define DAT_0_DYNADDR_PARITY W0_BIT_(23)
+#define DAT_0_DYNAMIC_ADDRESS W0_MASK(22, 16)
+#define DAT_0_TS W0_BIT_(15)
+#define DAT_0_MR_REJECT W0_BIT_(14)
+/* DAT_0_SIR_REJECT W0_BIT_(13) */
+/* DAT_0_IBI_PAYLOAD W0_BIT_(12) */
+#define DAT_0_STATIC_ADDRESS W0_MASK(6, 0)
+
+#define dat_w0_read(i) readl(hci->DAT_regs + (i) * 8)
+#define dat_w1_read(i) readl(hci->DAT_regs + (i) * 8 + 4)
+#define dat_w0_write(i, v) writel(v, hci->DAT_regs + (i) * 8)
+#define dat_w1_write(i, v) writel(v, hci->DAT_regs + (i) * 8 + 4)
+
+static inline bool dynaddr_parity(unsigned int addr)
+{
+ addr |= 1 << 7;
+ addr += addr >> 4;
+ addr += addr >> 2;
+ addr += addr >> 1;
+ return (addr & 1);
+}
+
+static int hci_dat_v1_init(struct i3c_hci *hci)
+{
+ unsigned int dat_idx;
+
+ if (!hci->DAT_regs) {
+ dev_err(&hci->master.dev,
+ "only DAT in register space is supported at the moment\n");
+ return -EOPNOTSUPP;
+ }
+ if (hci->DAT_entry_size != 8) {
+ dev_err(&hci->master.dev,
+ "only 8-bytes DAT entries are supported at the moment\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* use a bitmap for faster free slot search */
+ hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+ if (!hci->DAT_data)
+ return -ENOMEM;
+
+ /* clear them */
+ for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+ }
+
+ return 0;
+}
+
+static void hci_dat_v1_cleanup(struct i3c_hci *hci)
+{
+ bitmap_free(hci->DAT_data);
+ hci->DAT_data = NULL;
+}
+
+static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+{
+ unsigned int dat_idx;
+
+ dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
+ if (dat_idx >= hci->DAT_entries)
+ return -ENOENT;
+ __set_bit(dat_idx, hci->DAT_data);
+
+ /* default flags */
+ dat_w0_write(dat_idx, DAT_0_SIR_REJECT | DAT_0_MR_REJECT);
+
+ return dat_idx;
+}
+
+static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+{
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+ __clear_bit(dat_idx, hci->DAT_data);
+}
+
+static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+ unsigned int dat_idx, u8 address)
+{
+ u32 dat_w0;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w0 &= ~(DAT_0_DYNAMIC_ADDRESS | DAT_0_DYNADDR_PARITY);
+ dat_w0 |= FIELD_PREP(DAT_0_DYNAMIC_ADDRESS, address) |
+ (dynaddr_parity(address) ? DAT_0_DYNADDR_PARITY : 0);
+ dat_w0_write(dat_idx, dat_w0);
+}
+
+static void hci_dat_v1_set_static_addr(struct i3c_hci *hci,
+ unsigned int dat_idx, u8 address)
+{
+ u32 dat_w0;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w0 &= ~DAT_0_STATIC_ADDRESS;
+ dat_w0 |= FIELD_PREP(DAT_0_STATIC_ADDRESS, address);
+ dat_w0_write(dat_idx, dat_w0);
+}
+
+static void hci_dat_v1_set_flags(struct i3c_hci *hci, unsigned int dat_idx,
+ u32 w0_flags, u32 w1_flags)
+{
+ u32 dat_w0, dat_w1;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w1 = dat_w1_read(dat_idx);
+ dat_w0 |= w0_flags;
+ dat_w1 |= w1_flags;
+ dat_w0_write(dat_idx, dat_w0);
+ dat_w1_write(dat_idx, dat_w1);
+}
+
+static void hci_dat_v1_clear_flags(struct i3c_hci *hci, unsigned int dat_idx,
+ u32 w0_flags, u32 w1_flags)
+{
+ u32 dat_w0, dat_w1;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w1 = dat_w1_read(dat_idx);
+ dat_w0 &= ~w0_flags;
+ dat_w1 &= ~w1_flags;
+ dat_w0_write(dat_idx, dat_w0);
+ dat_w1_write(dat_idx, dat_w1);
+}
+
+static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr)
+{
+ unsigned int dat_idx;
+ u32 dat_w0;
+
+ for (dat_idx = find_first_bit(hci->DAT_data, hci->DAT_entries);
+ dat_idx < hci->DAT_entries;
+ dat_idx = find_next_bit(hci->DAT_data, hci->DAT_entries, dat_idx)) {
+ dat_w0 = dat_w0_read(dat_idx);
+ if (FIELD_GET(DAT_0_DYNAMIC_ADDRESS, dat_w0) == dev_addr)
+ return dat_idx;
+ }
+
+ return -ENODEV;
+}
+
+const struct hci_dat_ops mipi_i3c_hci_dat_v1 = {
+ .init = hci_dat_v1_init,
+ .cleanup = hci_dat_v1_cleanup,
+ .alloc_entry = hci_dat_v1_alloc_entry,
+ .free_entry = hci_dat_v1_free_entry,
+ .set_dynamic_addr = hci_dat_v1_set_dynamic_addr,
+ .set_static_addr = hci_dat_v1_set_static_addr,
+ .set_flags = hci_dat_v1_set_flags,
+ .clear_flags = hci_dat_v1_clear_flags,
+ .get_index = hci_dat_v1_get_index,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/dct.h b/drivers/i3c/master/mipi-i3c-hci/dct.h
new file mode 100644
index 000000000000..1028e0b40d89
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dct.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common DCT related stuff
+ */
+
+#ifndef DCT_H
+#define DCT_H
+
+void i3c_hci_dct_get_val(struct i3c_hci *hci, unsigned int dct_idx,
+ u64 *pid, unsigned int *dcr, unsigned int *bcr);
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/dct_v1.c b/drivers/i3c/master/mipi-i3c-hci/dct_v1.c
new file mode 100644
index 000000000000..acfd4d60f7b0
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dct_v1.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/device.h>
+#include <linux/bitfield.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "dct.h"
+
+/*
+ * Device Characteristic Table
+ */
+
+void i3c_hci_dct_get_val(struct i3c_hci *hci, unsigned int dct_idx,
+ u64 *pid, unsigned int *dcr, unsigned int *bcr)
+{
+ void __iomem *reg = hci->DCT_regs + dct_idx * 4 * 4;
+ u32 dct_entry_data[4];
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ dct_entry_data[i] = readl(reg);
+ reg += 4;
+ }
+
+ *pid = ((u64)dct_entry_data[0]) << (47 - 32 + 1) |
+ FIELD_GET(W1_MASK(47, 32), dct_entry_data[1]);
+ *dcr = FIELD_GET(W2_MASK(71, 64), dct_entry_data[2]);
+ *bcr = FIELD_GET(W2_MASK(79, 72), dct_entry_data[2]);
+}
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
new file mode 100644
index 000000000000..af873a9be050
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on
+ * v1.x of the spec and v2.0 will likely be split out.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "ibi.h"
+
+
+/*
+ * Software Parameter Values (somewhat arb itrary for now).
+ * Some of them could be determined at run time eventually.
+ */
+
+#define XFER_RINGS 1 /* max: 8 */
+#define XFER_RING_ENTRIES 16 /* max: 255 */
+
+#define IBI_RINGS 1 /* max: 8 */
+#define IBI_STATUS_RING_ENTRIES 32 /* max: 255 */
+#define IBI_CHUNK_CACHELINES 1 /* max: 256 bytes equivalent */
+#define IBI_CHUNK_POOL_SIZE 128 /* max: 1023 */
+
+/*
+ * Ring Header Preamble
+ */
+
+#define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r))
+#define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r))
+
+#define RHS_CONTROL 0x00
+#define PREAMBLE_SIZE GENMASK(31, 24) /* Preamble Section Size */
+#define HEADER_SIZE GENMASK(23, 16) /* Ring Header Size */
+#define MAX_HEADER_COUNT_CAP GENMASK(7, 4) /* HC Max Header Count */
+#define MAX_HEADER_COUNT GENMASK(3, 0) /* Driver Max Header Count */
+
+#define RHS_RHn_OFFSET(n) (0x04 + (n)*4)
+
+/*
+ * Ring Header (Per-Ring Bundle)
+ */
+
+#define rh_reg_read(r) readl(rh->regs + (RH_##r))
+#define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r))
+
+#define RH_CR_SETUP 0x00 /* Command/Response Ring */
+#define CR_XFER_STRUCT_SIZE GENMASK(31, 24)
+#define CR_RESP_STRUCT_SIZE GENMASK(23, 16)
+#define CR_RING_SIZE GENMASK(8, 0)
+
+#define RH_IBI_SETUP 0x04
+#define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24)
+#define IBI_STATUS_RING_SIZE GENMASK(23, 16)
+#define IBI_DATA_CHUNK_SIZE GENMASK(12, 10)
+#define IBI_DATA_CHUNK_COUNT GENMASK(9, 0)
+
+#define RH_CHUNK_CONTROL 0x08
+
+#define RH_INTR_STATUS 0x10
+#define RH_INTR_STATUS_ENABLE 0x14
+#define RH_INTR_SIGNAL_ENABLE 0x18
+#define RH_INTR_FORCE 0x1c
+#define INTR_IBI_READY BIT(12)
+#define INTR_TRANSFER_COMPLETION BIT(11)
+#define INTR_RING_OP BIT(10)
+#define INTR_TRANSFER_ERR BIT(9)
+#define INTR_WARN_INS_STOP_MODE BIT(7)
+#define INTR_IBI_RING_FULL BIT(6)
+#define INTR_TRANSFER_ABORT BIT(5)
+
+#define RH_RING_STATUS 0x20
+#define RING_STATUS_LOCKED BIT(3)
+#define RING_STATUS_ABORTED BIT(2)
+#define RING_STATUS_RUNNING BIT(1)
+#define RING_STATUS_ENABLED BIT(0)
+
+#define RH_RING_CONTROL 0x24
+#define RING_CTRL_ABORT BIT(2)
+#define RING_CTRL_RUN_STOP BIT(1)
+#define RING_CTRL_ENABLE BIT(0)
+
+#define RH_RING_OPERATION1 0x28
+#define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16)
+#define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8)
+#define RING_OP1_CR_ENQ_PTR GENMASK(7, 0)
+
+#define RH_RING_OPERATION2 0x2c
+#define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16)
+#define RING_OP2_CR_DEQ_PTR GENMASK(7, 0)
+
+#define RH_CMD_RING_BASE_LO 0x30
+#define RH_CMD_RING_BASE_HI 0x34
+#define RH_RESP_RING_BASE_LO 0x38
+#define RH_RESP_RING_BASE_HI 0x3c
+#define RH_IBI_STATUS_RING_BASE_LO 0x40
+#define RH_IBI_STATUS_RING_BASE_HI 0x44
+#define RH_IBI_DATA_RING_BASE_LO 0x48
+#define RH_IBI_DATA_RING_BASE_HI 0x4c
+
+#define RH_CMD_RING_SG 0x50 /* Ring Scatter Gather Support */
+#define RH_RESP_RING_SG 0x54
+#define RH_IBI_STATUS_RING_SG 0x58
+#define RH_IBI_DATA_RING_SG 0x5c
+#define RING_SG_BLP BIT(31) /* Buffer Vs. List Pointer */
+#define RING_SG_LIST_SIZE GENMASK(15, 0)
+
+/*
+ * Data Buffer Descriptor (in memory)
+ */
+
+#define DATA_BUF_BLP BIT(31) /* Buffer Vs. List Pointer */
+#define DATA_BUF_IOC BIT(30) /* Interrupt on Completion */
+#define DATA_BUF_BLOCK_SIZE GENMASK(15, 0)
+
+
+struct hci_rh_data {
+ void __iomem *regs;
+ void *xfer, *resp, *ibi_status, *ibi_data;
+ dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
+ unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
+ unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
+ unsigned int done_ptr, ibi_chunk_ptr;
+ struct hci_xfer **src_xfers;
+ spinlock_t lock;
+ struct completion op_done;
+};
+
+struct hci_rings_data {
+ unsigned int total;
+ struct hci_rh_data headers[];
+};
+
+struct hci_dma_dev_ibi_data {
+ struct i3c_generic_ibi_pool *pool;
+ unsigned int max_len;
+};
+
+static inline u32 lo32(dma_addr_t physaddr)
+{
+ return physaddr;
+}
+
+static inline u32 hi32(dma_addr_t physaddr)
+{
+ /* trickery to avoid compiler warnings on 32-bit build targets */
+ if (sizeof(dma_addr_t) > 4) {
+ u64 hi = physaddr;
+ return hi >> 32;
+ }
+ return 0;
+}
+
+static void hci_dma_cleanup(struct i3c_hci *hci)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ struct hci_rh_data *rh;
+ unsigned int i;
+
+ if (!rings)
+ return;
+
+ for (i = 0; i < rings->total; i++) {
+ rh = &rings->headers[i];
+
+ rh_reg_write(RING_CONTROL, 0);
+ rh_reg_write(CR_SETUP, 0);
+ rh_reg_write(IBI_SETUP, 0);
+ rh_reg_write(INTR_SIGNAL_ENABLE, 0);
+
+ if (rh->xfer)
+ dma_free_coherent(&hci->master.dev,
+ rh->xfer_struct_sz * rh->xfer_entries,
+ rh->xfer, rh->xfer_dma);
+ if (rh->resp)
+ dma_free_coherent(&hci->master.dev,
+ rh->resp_struct_sz * rh->xfer_entries,
+ rh->resp, rh->resp_dma);
+ kfree(rh->src_xfers);
+ if (rh->ibi_status)
+ dma_free_coherent(&hci->master.dev,
+ rh->ibi_status_sz * rh->ibi_status_entries,
+ rh->ibi_status, rh->ibi_status_dma);
+ if (rh->ibi_data_dma)
+ dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
+ rh->ibi_chunk_sz * rh->ibi_chunks_total,
+ DMA_FROM_DEVICE);
+ kfree(rh->ibi_data);
+ }
+
+ rhs_reg_write(CONTROL, 0);
+
+ kfree(rings);
+ hci->io_data = NULL;
+}
+
+static int hci_dma_init(struct i3c_hci *hci)
+{
+ struct hci_rings_data *rings;
+ struct hci_rh_data *rh;
+ u32 regval;
+ unsigned int i, nr_rings, xfers_sz, resps_sz;
+ unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
+ int ret;
+
+ regval = rhs_reg_read(CONTROL);
+ nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
+ dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
+ if (unlikely(nr_rings > 8)) {
+ dev_err(&hci->master.dev, "number of rings should be <= 8\n");
+ nr_rings = 8;
+ }
+ if (nr_rings > XFER_RINGS)
+ nr_rings = XFER_RINGS;
+ rings = kzalloc(sizeof(*rings) + nr_rings * sizeof(*rh), GFP_KERNEL);
+ if (!rings)
+ return -ENOMEM;
+ hci->io_data = rings;
+ rings->total = nr_rings;
+
+ for (i = 0; i < rings->total; i++) {
+ u32 offset = rhs_reg_read(RHn_OFFSET(i));
+
+ dev_info(&hci->master.dev, "Ring %d at offset %#x\n", i, offset);
+ ret = -EINVAL;
+ if (!offset)
+ goto err_out;
+ rh = &rings->headers[i];
+ rh->regs = hci->base_regs + offset;
+ spin_lock_init(&rh->lock);
+ init_completion(&rh->op_done);
+
+ rh->xfer_entries = XFER_RING_ENTRIES;
+
+ regval = rh_reg_read(CR_SETUP);
+ rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
+ rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
+ DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
+ rh->xfer_struct_sz, rh->resp_struct_sz);
+ xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
+ resps_sz = rh->resp_struct_sz * rh->xfer_entries;
+
+ rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
+ &rh->xfer_dma, GFP_KERNEL);
+ rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
+ &rh->resp_dma, GFP_KERNEL);
+ rh->src_xfers =
+ kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
+ GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!rh->xfer || !rh->resp || !rh->src_xfers)
+ goto err_out;
+
+ rh_reg_write(CMD_RING_BASE_LO, lo32(rh->xfer_dma));
+ rh_reg_write(CMD_RING_BASE_HI, hi32(rh->xfer_dma));
+ rh_reg_write(RESP_RING_BASE_LO, lo32(rh->resp_dma));
+ rh_reg_write(RESP_RING_BASE_HI, hi32(rh->resp_dma));
+
+ regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
+ rh_reg_write(CR_SETUP, regval);
+
+ rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+ rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY |
+ INTR_TRANSFER_COMPLETION |
+ INTR_RING_OP |
+ INTR_TRANSFER_ERR |
+ INTR_WARN_INS_STOP_MODE |
+ INTR_IBI_RING_FULL |
+ INTR_TRANSFER_ABORT);
+
+ /* IBIs */
+
+ if (i >= IBI_RINGS)
+ goto ring_ready;
+
+ regval = rh_reg_read(IBI_SETUP);
+ rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval);
+ rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES;
+ rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE;
+
+ rh->ibi_chunk_sz = dma_get_cache_alignment();
+ rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
+ BUG_ON(rh->ibi_chunk_sz > 256);
+
+ ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
+ ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
+
+ rh->ibi_status =
+ dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
+ &rh->ibi_status_dma, GFP_KERNEL);
+ rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!rh->ibi_status || !rh->ibi_data)
+ goto err_out;
+ rh->ibi_data_dma =
+ dma_map_single(&hci->master.dev, rh->ibi_data,
+ ibi_data_ring_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
+ rh->ibi_data_dma = 0;
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
+ rh->ibi_status_entries) |
+ FIELD_PREP(IBI_DATA_CHUNK_SIZE,
+ ilog2(rh->ibi_chunk_sz) - 2) |
+ FIELD_PREP(IBI_DATA_CHUNK_COUNT,
+ rh->ibi_chunks_total);
+ rh_reg_write(IBI_SETUP, regval);
+
+ regval = rh_reg_read(INTR_SIGNAL_ENABLE);
+ regval |= INTR_IBI_READY;
+ rh_reg_write(INTR_SIGNAL_ENABLE, regval);
+
+ring_ready:
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ }
+
+ regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
+ rhs_reg_write(CONTROL, regval);
+ return 0;
+
+err_out:
+ hci_dma_cleanup(hci);
+ return ret;
+}
+
+static void hci_dma_unmap_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer_list, unsigned int n)
+{
+ struct hci_xfer *xfer;
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ xfer = xfer_list + i;
+ dma_unmap_single(&hci->master.dev,
+ xfer->data_dma, xfer->data_len,
+ xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static int hci_dma_queue_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer_list, int n)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ struct hci_rh_data *rh;
+ unsigned int i, ring, enqueue_ptr;
+ u32 op1_val, op2_val;
+
+ /* For now we only use ring 0 */
+ ring = 0;
+ rh = &rings->headers[ring];
+
+ op1_val = rh_reg_read(RING_OPERATION1);
+ enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
+ for (i = 0; i < n; i++) {
+ struct hci_xfer *xfer = xfer_list + i;
+ u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
+
+ /* store cmd descriptor */
+ *ring_data++ = xfer->cmd_desc[0];
+ *ring_data++ = xfer->cmd_desc[1];
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ *ring_data++ = xfer->cmd_desc[2];
+ *ring_data++ = xfer->cmd_desc[3];
+ }
+
+ /* first word of Data Buffer Descriptor Structure */
+ if (!xfer->data)
+ xfer->data_len = 0;
+ *ring_data++ =
+ FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
+ ((i == n - 1) ? DATA_BUF_IOC : 0);
+
+ /* 2nd and 3rd words of Data Buffer Descriptor Structure */
+ if (xfer->data) {
+ xfer->data_dma =
+ dma_map_single(&hci->master.dev,
+ xfer->data,
+ xfer->data_len,
+ xfer->rnw ?
+ DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&hci->master.dev,
+ xfer->data_dma)) {
+ hci_dma_unmap_xfer(hci, xfer_list, i);
+ return -ENOMEM;
+ }
+ *ring_data++ = lo32(xfer->data_dma);
+ *ring_data++ = hi32(xfer->data_dma);
+ } else {
+ *ring_data++ = 0;
+ *ring_data++ = 0;
+ }
+
+ /* remember corresponding xfer struct */
+ rh->src_xfers[enqueue_ptr] = xfer;
+ /* remember corresponding ring/entry for this xfer structure */
+ xfer->ring_number = ring;
+ xfer->ring_entry = enqueue_ptr;
+
+ enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
+
+ /*
+ * We may update the hardware view of the enqueue pointer
+ * only if we didn't reach its dequeue pointer.
+ */
+ op2_val = rh_reg_read(RING_OPERATION2);
+ if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
+ /* the ring is full */
+ hci_dma_unmap_xfer(hci, xfer_list, i + 1);
+ return -EBUSY;
+ }
+ }
+
+ /* take care to update the hardware enqueue pointer atomically */
+ spin_lock_irq(&rh->lock);
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_CR_ENQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock_irq(&rh->lock);
+
+ return 0;
+}
+
+static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer_list, int n)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
+ unsigned int i;
+ bool did_unqueue = false;
+
+ /* stop the ring */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
+ if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
+ /*
+ * We're deep in it if ever this condition is ever met.
+ * Hardware might still be writing to memory, etc.
+ * Better suspend the world than risking silent corruption.
+ */
+ dev_crit(&hci->master.dev, "unable to abort the ring\n");
+ BUG();
+ }
+
+ for (i = 0; i < n; i++) {
+ struct hci_xfer *xfer = xfer_list + i;
+ int idx = xfer->ring_entry;
+
+ /*
+ * At the time the abort happened, the xfer might have
+ * completed already. If not then replace corresponding
+ * descriptor entries with a no-op.
+ */
+ if (idx >= 0) {
+ u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
+
+ /* store no-op cmd descriptor */
+ *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
+ *ring_data++ = 0;
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ *ring_data++ = 0;
+ *ring_data++ = 0;
+ }
+
+ /* disassociate this xfer struct */
+ rh->src_xfers[idx] = NULL;
+
+ /* and unmap it */
+ hci_dma_unmap_xfer(hci, xfer, 1);
+
+ did_unqueue = true;
+ }
+ }
+
+ /* restart the ring */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+
+ return did_unqueue;
+}
+
+static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
+{
+ u32 op1_val, op2_val, resp, *ring_resp;
+ unsigned int tid, done_ptr = rh->done_ptr;
+ struct hci_xfer *xfer;
+
+ for (;;) {
+ op2_val = rh_reg_read(RING_OPERATION2);
+ if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val))
+ break;
+
+ ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
+ resp = *ring_resp;
+ tid = RESP_TID(resp);
+ DBG("resp = 0x%08x", resp);
+
+ xfer = rh->src_xfers[done_ptr];
+ if (!xfer) {
+ DBG("orphaned ring entry");
+ } else {
+ hci_dma_unmap_xfer(hci, xfer, 1);
+ xfer->ring_entry = -1;
+ xfer->response = resp;
+ if (tid != xfer->cmd_tid) {
+ dev_err(&hci->master.dev,
+ "response tid=%d when expecting %d\n",
+ tid, xfer->cmd_tid);
+ /* TODO: do something about it? */
+ }
+ if (xfer->completion)
+ complete(xfer->completion);
+ }
+
+ done_ptr = (done_ptr + 1) % rh->xfer_entries;
+ rh->done_ptr = done_ptr;
+ }
+
+ /* take care to update the software dequeue pointer atomically */
+ spin_lock(&rh->lock);
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock(&rh->lock);
+}
+
+static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct i3c_generic_ibi_pool *pool;
+ struct hci_dma_dev_ibi_data *dev_ibi;
+
+ dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
+ if (!dev_ibi)
+ return -ENOMEM;
+ pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(pool)) {
+ kfree(dev_ibi);
+ return PTR_ERR(pool);
+ }
+ dev_ibi->pool = pool;
+ dev_ibi->max_len = req->max_payload_len;
+ dev_data->ibi_data = dev_ibi;
+ return 0;
+}
+
+static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ dev_data->ibi_data = NULL;
+ i3c_generic_ibi_free_pool(dev_ibi->pool);
+ kfree(dev_ibi);
+}
+
+static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
+}
+
+static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+{
+ struct i3c_dev_desc *dev;
+ struct i3c_hci_dev_data *dev_data;
+ struct hci_dma_dev_ibi_data *dev_ibi;
+ struct i3c_ibi_slot *slot;
+ u32 op1_val, op2_val, ibi_status_error;
+ unsigned int ptr, enq_ptr, deq_ptr;
+ unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
+ int ibi_addr, last_ptr;
+ void *ring_ibi_data;
+ dma_addr_t ring_ibi_data_dma;
+
+ op1_val = rh_reg_read(RING_OPERATION1);
+ deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);
+
+ op2_val = rh_reg_read(RING_OPERATION2);
+ enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);
+
+ ibi_status_error = 0;
+ ibi_addr = -1;
+ ibi_chunks = 0;
+ ibi_size = 0;
+ last_ptr = -1;
+
+ /* let's find all we can about this IBI */
+ for (ptr = deq_ptr; ptr != enq_ptr;
+ ptr = (ptr + 1) % rh->ibi_status_entries) {
+ u32 ibi_status, *ring_ibi_status;
+ unsigned int chunks;
+
+ ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
+ ibi_status = *ring_ibi_status;
+ DBG("status = %#x", ibi_status);
+
+ if (ibi_status_error) {
+ /* we no longer care */
+ } else if (ibi_status & IBI_ERROR) {
+ ibi_status_error = ibi_status;
+ } else if (ibi_addr == -1) {
+ ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+ } else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) {
+ /* the address changed unexpectedly */
+ ibi_status_error = ibi_status;
+ }
+
+ chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
+ ibi_chunks += chunks;
+ if (!(ibi_status & IBI_LAST_STATUS)) {
+ ibi_size += chunks * rh->ibi_chunk_sz;
+ } else {
+ ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status);
+ last_ptr = ptr;
+ break;
+ }
+ }
+
+ /* validate what we've got */
+
+ if (last_ptr == -1) {
+ /* this IBI sequence is not yet complete */
+ DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
+ return;
+ }
+ deq_ptr = last_ptr + 1;
+ deq_ptr %= rh->ibi_status_entries;
+
+ if (ibi_status_error) {
+ dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
+ goto done;
+ }
+
+ /* determine who this is for */
+ dev = i3c_hci_addr_to_dev(hci, ibi_addr);
+ if (!dev) {
+ dev_err(&hci->master.dev,
+ "IBI for unknown device %#x\n", ibi_addr);
+ goto done;
+ }
+
+ dev_data = i3c_dev_get_master_data(dev);
+ dev_ibi = dev_data->ibi_data;
+ if (ibi_size > dev_ibi->max_len) {
+ dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
+ ibi_size, dev_ibi->max_len);
+ goto done;
+ }
+
+ /*
+ * This ring model is not suitable for zero-copy processing of IBIs.
+ * We have the data chunk ring wrap-around to deal with, meaning
+ * that the payload might span multiple chunks beginning at the
+ * end of the ring and wrap to the start of the ring. Furthermore
+ * there is no guarantee that those chunks will be released in order
+ * and in a timely manner by the upper driver. So let's just copy
+ * them to a discrete buffer. In practice they're supposed to be
+ * small anyway.
+ */
+ slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
+ if (!slot) {
+ dev_err(&hci->master.dev, "no free slot for IBI\n");
+ goto done;
+ }
+
+ /* copy first part of the payload */
+ ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
+ ring_ibi_data = rh->ibi_data + ibi_data_offset;
+ ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
+ first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
+ * rh->ibi_chunk_sz;
+ if (first_part > ibi_size)
+ first_part = ibi_size;
+ dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ first_part, DMA_FROM_DEVICE);
+ memcpy(slot->data, ring_ibi_data, first_part);
+
+ /* copy second part if any */
+ if (ibi_size > first_part) {
+ /* we wrap back to the start and copy remaining data */
+ ring_ibi_data = rh->ibi_data;
+ ring_ibi_data_dma = rh->ibi_data_dma;
+ dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ ibi_size - first_part, DMA_FROM_DEVICE);
+ memcpy(slot->data + first_part, ring_ibi_data,
+ ibi_size - first_part);
+ }
+
+ /* submit it */
+ slot->dev = dev;
+ slot->len = ibi_size;
+ i3c_master_queue_ibi(dev, slot);
+
+done:
+ /* take care to update the ibi dequeue pointer atomically */
+ spin_lock(&rh->lock);
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_IBI_DEQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock(&rh->lock);
+
+ /* update the chunk pointer */
+ rh->ibi_chunk_ptr += ibi_chunks;
+ rh->ibi_chunk_ptr %= rh->ibi_chunks_total;
+
+ /* and tell the hardware about freed chunks */
+ rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
+}
+
+static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ unsigned int i;
+ bool handled = false;
+
+ for (i = 0; mask && i < 8; i++) {
+ struct hci_rh_data *rh;
+ u32 status;
+
+ if (!(mask & BIT(i)))
+ continue;
+ mask &= ~BIT(i);
+
+ rh = &rings->headers[i];
+ status = rh_reg_read(INTR_STATUS);
+ DBG("rh%d status: %#x", i, status);
+ if (!status)
+ continue;
+ rh_reg_write(INTR_STATUS, status);
+
+ if (status & INTR_IBI_READY)
+ hci_dma_process_ibi(hci, rh);
+ if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
+ hci_dma_xfer_done(hci, rh);
+ if (status & INTR_RING_OP)
+ complete(&rh->op_done);
+
+ if (status & INTR_TRANSFER_ABORT)
+ dev_notice_ratelimited(&hci->master.dev,
+ "ring %d: Transfer Aborted\n", i);
+ if (status & INTR_WARN_INS_STOP_MODE)
+ dev_warn_ratelimited(&hci->master.dev,
+ "ring %d: Inserted Stop on Mode Change\n", i);
+ if (status & INTR_IBI_RING_FULL)
+ dev_err_ratelimited(&hci->master.dev,
+ "ring %d: IBI Ring Full Condition\n", i);
+
+ handled = true;
+ }
+
+ return handled;
+}
+
+const struct hci_io_ops mipi_i3c_hci_dma = {
+ .init = hci_dma_init,
+ .cleanup = hci_dma_cleanup,
+ .queue_xfer = hci_dma_queue_xfer,
+ .dequeue_xfer = hci_dma_dequeue_xfer,
+ .irq_handler = hci_dma_irq_handler,
+ .request_ibi = hci_dma_request_ibi,
+ .free_ibi = hci_dma_free_ibi,
+ .recycle_ibi_slot = hci_dma_recycle_ibi_slot,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
new file mode 100644
index 000000000000..2e9b23efdc45
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "ext_caps.h"
+#include "xfer_mode_rate.h"
+
+
+/* Extended Capability Header */
+#define CAP_HEADER_LENGTH GENMASK(23, 8)
+#define CAP_HEADER_ID GENMASK(7, 0)
+
+static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
+{
+ hci->vendor_mipi_id = readl(base + 0x04);
+ hci->vendor_version_id = readl(base + 0x08);
+ hci->vendor_product_id = readl(base + 0x0c);
+
+ dev_info(&hci->master.dev, "vendor MIPI ID: %#x\n", hci->vendor_mipi_id);
+ dev_info(&hci->master.dev, "vendor version ID: %#x\n", hci->vendor_version_id);
+ dev_info(&hci->master.dev, "vendor product ID: %#x\n", hci->vendor_product_id);
+
+ /* ought to go in a table if this grows too much */
+ switch (hci->vendor_mipi_id) {
+ case MIPI_VENDOR_NXP:
+ hci->quirks |= HCI_QUIRK_RAW_CCC;
+ DBG("raw CCC quirks set");
+ break;
+ }
+
+ return 0;
+}
+
+static int hci_extcap_master_config(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 master_config = readl(base + 0x04);
+ unsigned int operation_mode = FIELD_GET(GENMASK(5, 4), master_config);
+ static const char * const functionality[] = {
+ "(unknown)", "master only", "target only",
+ "primary/secondary master" };
+ dev_info(&hci->master.dev, "operation mode: %s\n", functionality[operation_mode]);
+ if (operation_mode & 0x1)
+ return 0;
+ dev_err(&hci->master.dev, "only master mode is currently supported\n");
+ return -EOPNOTSUPP;
+}
+
+static int hci_extcap_multi_bus(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 bus_instance = readl(base + 0x04);
+ unsigned int count = FIELD_GET(GENMASK(3, 0), bus_instance);
+
+ dev_info(&hci->master.dev, "%d bus instances\n", count);
+ return 0;
+}
+
+static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 header = readl(base);
+ u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
+ unsigned int index;
+
+ dev_info(&hci->master.dev, "transfer mode table has %d entries\n",
+ entries);
+ base += 4; /* skip header */
+ for (index = 0; index < entries; index++) {
+ u32 mode_entry = readl(base);
+
+ DBG("mode %d: 0x%08x", index, mode_entry);
+ /* TODO: will be needed when I3C core does more than SDR */
+ base += 4;
+ }
+
+ return 0;
+}
+
+static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 header = readl(base);
+ u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
+ u32 rate_entry;
+ unsigned int index, rate, rate_id, mode_id;
+
+ base += 4; /* skip header */
+
+ dev_info(&hci->master.dev, "available data rates:\n");
+ for (index = 0; index < entries; index++) {
+ rate_entry = readl(base);
+ DBG("entry %d: 0x%08x", index, rate_entry);
+ rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
+ rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
+ mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
+ dev_info(&hci->master.dev, "rate %d for %s = %d kHz\n",
+ rate_id,
+ mode_id == XFERRATE_MODE_I3C ? "I3C" :
+ mode_id == XFERRATE_MODE_I2C ? "I2C" :
+ "unknown mode",
+ rate);
+ base += 4;
+ }
+
+ return 0;
+}
+
+static int hci_extcap_auto_command(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 autocmd_ext_caps = readl(base + 0x04);
+ unsigned int max_count = FIELD_GET(GENMASK(3, 0), autocmd_ext_caps);
+ u32 autocmd_ext_config = readl(base + 0x08);
+ unsigned int count = FIELD_GET(GENMASK(3, 0), autocmd_ext_config);
+
+ dev_info(&hci->master.dev, "%d/%d active auto-command entries\n",
+ count, max_count);
+ /* remember auto-command register location for later use */
+ hci->AUTOCMD_regs = base;
+ return 0;
+}
+
+static int hci_extcap_debug(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "debug registers present\n");
+ hci->DEBUG_regs = base;
+ return 0;
+}
+
+static int hci_extcap_scheduled_cmd(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "scheduled commands available\n");
+ /* hci->schedcmd_regs = base; */
+ return 0;
+}
+
+static int hci_extcap_non_curr_master(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "Non-Current Master support available\n");
+ /* hci->NCM_regs = base; */
+ return 0;
+}
+
+static int hci_extcap_ccc_resp_conf(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "CCC Response Configuration available\n");
+ return 0;
+}
+
+static int hci_extcap_global_DAT(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "Global DAT available\n");
+ return 0;
+}
+
+static int hci_extcap_multilane(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "Master Multi-Lane support available\n");
+ return 0;
+}
+
+static int hci_extcap_ncm_multilane(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "NCM Multi-Lane support available\n");
+ return 0;
+}
+
+struct hci_ext_caps {
+ u8 id;
+ u16 min_length;
+ int (*parser)(struct i3c_hci *hci, void __iomem *base);
+};
+
+#define EXT_CAP(_id, _highest_mandatory_reg_offset, _parser) \
+ { .id = (_id), .parser = (_parser), \
+ .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
+
+static const struct hci_ext_caps ext_capabilities[] = {
+ EXT_CAP(0x01, 0x0c, hci_extcap_hardware_id),
+ EXT_CAP(0x02, 0x04, hci_extcap_master_config),
+ EXT_CAP(0x03, 0x04, hci_extcap_multi_bus),
+ EXT_CAP(0x04, 0x24, hci_extcap_xfer_modes),
+ EXT_CAP(0x05, 0x08, hci_extcap_auto_command),
+ EXT_CAP(0x08, 0x40, hci_extcap_xfer_rates),
+ EXT_CAP(0x0c, 0x10, hci_extcap_debug),
+ EXT_CAP(0x0d, 0x0c, hci_extcap_scheduled_cmd),
+ EXT_CAP(0x0e, 0x80, hci_extcap_non_curr_master), /* TODO confirm size */
+ EXT_CAP(0x0f, 0x04, hci_extcap_ccc_resp_conf),
+ EXT_CAP(0x10, 0x08, hci_extcap_global_DAT),
+ EXT_CAP(0x9d, 0x04, hci_extcap_multilane),
+ EXT_CAP(0x9e, 0x04, hci_extcap_ncm_multilane),
+};
+
+static int hci_extcap_vendor_NXP(struct i3c_hci *hci, void __iomem *base)
+{
+ hci->vendor_data = (__force void *)base;
+ dev_info(&hci->master.dev, "Build Date Info = %#x\n", readl(base + 1*4));
+ /* reset the FPGA */
+ writel(0xdeadbeef, base + 1*4);
+ return 0;
+}
+
+struct hci_ext_cap_vendor_specific {
+ u32 vendor;
+ u8 cap;
+ u16 min_length;
+ int (*parser)(struct i3c_hci *hci, void __iomem *base);
+};
+
+#define EXT_CAP_VENDOR(_vendor, _cap, _highest_mandatory_reg_offset) \
+ { .vendor = (MIPI_VENDOR_##_vendor), .cap = (_cap), \
+ .parser = (hci_extcap_vendor_##_vendor), \
+ .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
+
+static const struct hci_ext_cap_vendor_specific vendor_ext_caps[] = {
+ EXT_CAP_VENDOR(NXP, 0xc0, 0x20),
+};
+
+static int hci_extcap_vendor_specific(struct i3c_hci *hci, void __iomem *base,
+ u32 cap_id, u32 cap_length)
+{
+ const struct hci_ext_cap_vendor_specific *vendor_cap_entry;
+ int i;
+
+ vendor_cap_entry = NULL;
+ for (i = 0; i < ARRAY_SIZE(vendor_ext_caps); i++) {
+ if (vendor_ext_caps[i].vendor == hci->vendor_mipi_id &&
+ vendor_ext_caps[i].cap == cap_id) {
+ vendor_cap_entry = &vendor_ext_caps[i];
+ break;
+ }
+ }
+
+ if (!vendor_cap_entry) {
+ dev_notice(&hci->master.dev,
+ "unknown ext_cap 0x%02x for vendor 0x%02x\n",
+ cap_id, hci->vendor_mipi_id);
+ return 0;
+ }
+ if (cap_length < vendor_cap_entry->min_length) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (expecting >= %d)\n",
+ cap_id, cap_length, vendor_cap_entry->min_length);
+ return -EINVAL;
+ }
+ return vendor_cap_entry->parser(hci, base);
+}
+
+int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
+{
+ void __iomem *curr_cap = hci->EXTCAPS_regs;
+ void __iomem *end = curr_cap + 0x1000; /* some arbitrary limit */
+ u32 cap_header, cap_id, cap_length;
+ const struct hci_ext_caps *cap_entry;
+ int i, err = 0;
+
+ if (!curr_cap)
+ return 0;
+
+ for (; !err && curr_cap < end; curr_cap += cap_length * 4) {
+ cap_header = readl(curr_cap);
+ cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
+ cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
+ DBG("id=0x%02x length=%d", cap_id, cap_length);
+ if (!cap_length)
+ break;
+ if (curr_cap + cap_length * 4 >= end) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (too big)\n",
+ cap_id, cap_length);
+ err = -EINVAL;
+ break;
+ }
+
+ if (cap_id >= 0xc0 && cap_id <= 0xcf) {
+ err = hci_extcap_vendor_specific(hci, curr_cap,
+ cap_id, cap_length);
+ continue;
+ }
+
+ cap_entry = NULL;
+ for (i = 0; i < ARRAY_SIZE(ext_capabilities); i++) {
+ if (ext_capabilities[i].id == cap_id) {
+ cap_entry = &ext_capabilities[i];
+ break;
+ }
+ }
+ if (!cap_entry) {
+ dev_notice(&hci->master.dev,
+ "unknown ext_cap 0x%02x\n", cap_id);
+ } else if (cap_length < cap_entry->min_length) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (expecting >= %d)\n",
+ cap_id, cap_length, cap_entry->min_length);
+ err = -EINVAL;
+ } else {
+ err = cap_entry->parser(hci, curr_cap);
+ }
+ }
+ return err;
+}
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.h b/drivers/i3c/master/mipi-i3c-hci/ext_caps.h
new file mode 100644
index 000000000000..9df17822fdb4
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Extended Capability Definitions
+ */
+
+#ifndef EXTCAPS_H
+#define EXTCAPS_H
+
+/* MIPI vendor IDs */
+#define MIPI_VENDOR_NXP 0x11b
+
+
+int i3c_hci_parse_ext_caps(struct i3c_hci *hci);
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
new file mode 100644
index 000000000000..80beb1d5be8f
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common HCI stuff
+ */
+
+#ifndef HCI_H
+#define HCI_H
+
+
+/* Handy logging macro to save on line length */
+#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
+
+/* 32-bit word aware bit and mask macros */
+#define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0)
+#define W1_MASK(h, l) GENMASK((h) - 32, (l) - 32)
+#define W2_MASK(h, l) GENMASK((h) - 64, (l) - 64)
+#define W3_MASK(h, l) GENMASK((h) - 96, (l) - 96)
+
+/* Same for single bit macros (trailing _ to align with W*_MASK width) */
+#define W0_BIT_(x) BIT((x) - 0)
+#define W1_BIT_(x) BIT((x) - 32)
+#define W2_BIT_(x) BIT((x) - 64)
+#define W3_BIT_(x) BIT((x) - 96)
+
+
+struct hci_cmd_ops;
+
+/* Our main structure */
+struct i3c_hci {
+ struct i3c_master_controller master;
+ void __iomem *base_regs;
+ void __iomem *DAT_regs;
+ void __iomem *DCT_regs;
+ void __iomem *RHS_regs;
+ void __iomem *PIO_regs;
+ void __iomem *EXTCAPS_regs;
+ void __iomem *AUTOCMD_regs;
+ void __iomem *DEBUG_regs;
+ const struct hci_io_ops *io;
+ void *io_data;
+ const struct hci_cmd_ops *cmd;
+ atomic_t next_cmd_tid;
+ u32 caps;
+ unsigned int quirks;
+ unsigned int DAT_entries;
+ unsigned int DAT_entry_size;
+ void *DAT_data;
+ unsigned int DCT_entries;
+ unsigned int DCT_entry_size;
+ u8 version_major;
+ u8 version_minor;
+ u8 revision;
+ u32 vendor_mipi_id;
+ u32 vendor_version_id;
+ u32 vendor_product_id;
+ void *vendor_data;
+};
+
+
+/*
+ * Structure to represent a master initiated transfer.
+ * The rnw, data and data_len fields must be initialized before calling any
+ * hci->cmd->*() method. The cmd method will initialize cmd_desc[] and
+ * possibly modify (clear) the data field. Then xfer->cmd_desc[0] can
+ * be augmented with CMD_0_ROC and/or CMD_0_TOC.
+ * The completion field needs to be initialized before queueing with
+ * hci->io->queue_xfer(), and requires CMD_0_ROC to be set.
+ */
+struct hci_xfer {
+ u32 cmd_desc[4];
+ u32 response;
+ bool rnw;
+ void *data;
+ unsigned int data_len;
+ unsigned int cmd_tid;
+ struct completion *completion;
+ union {
+ struct {
+ /* PIO specific */
+ struct hci_xfer *next_xfer;
+ struct hci_xfer *next_data;
+ struct hci_xfer *next_resp;
+ unsigned int data_left;
+ u32 data_word_before_partial;
+ };
+ struct {
+ /* DMA specific */
+ dma_addr_t data_dma;
+ int ring_number;
+ int ring_entry;
+ };
+ };
+};
+
+static inline struct hci_xfer *hci_alloc_xfer(unsigned int n)
+{
+ return kzalloc(sizeof(struct hci_xfer) * n, GFP_KERNEL);
+}
+
+static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n)
+{
+ kfree(xfer);
+}
+
+
+/* This abstracts PIO vs DMA operations */
+struct hci_io_ops {
+ bool (*irq_handler)(struct i3c_hci *hci, unsigned int mask);
+ int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
+ bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
+ int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+ int (*init)(struct i3c_hci *hci);
+ void (*cleanup)(struct i3c_hci *hci);
+};
+
+extern const struct hci_io_ops mipi_i3c_hci_pio;
+extern const struct hci_io_ops mipi_i3c_hci_dma;
+
+
+/* Our per device master private data */
+struct i3c_hci_dev_data {
+ int dat_idx;
+ void *ibi_data;
+};
+
+
+/* list of quirks */
+#define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */
+
+
+/* global functions */
+void mipi_i3c_hci_resume(struct i3c_hci *hci);
+void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
+void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/ibi.h b/drivers/i3c/master/mipi-i3c-hci/ibi.h
new file mode 100644
index 000000000000..e1f98e264da0
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/ibi.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common IBI related stuff
+ */
+
+#ifndef IBI_H
+#define IBI_H
+
+/*
+ * IBI Status Descriptor bits
+ */
+#define IBI_STS BIT(31)
+#define IBI_ERROR BIT(30)
+#define IBI_STATUS_TYPE BIT(29)
+#define IBI_HW_CONTEXT GENMASK(28, 26)
+#define IBI_TS BIT(25)
+#define IBI_LAST_STATUS BIT(24)
+#define IBI_CHUNKS GENMASK(23, 16)
+#define IBI_ID GENMASK(15, 8)
+#define IBI_TARGET_ADDR GENMASK(15, 9)
+#define IBI_TARGET_RNW BIT(8)
+#define IBI_DATA_LENGTH GENMASK(7, 0)
+
+/* handy helpers */
+static inline struct i3c_dev_desc *
+i3c_hci_addr_to_dev(struct i3c_hci *hci, unsigned int addr)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+ struct i3c_dev_desc *dev;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ if (dev->info.dyn_addr == addr)
+ return dev;
+ }
+ return NULL;
+}
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c
new file mode 100644
index 000000000000..d0272aa93599
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/pio.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "ibi.h"
+
+
+/*
+ * PIO Access Area
+ */
+
+#define pio_reg_read(r) readl(hci->PIO_regs + (PIO_##r))
+#define pio_reg_write(r, v) writel(v, hci->PIO_regs + (PIO_##r))
+
+#define PIO_COMMAND_QUEUE_PORT 0x00
+#define PIO_RESPONSE_QUEUE_PORT 0x04
+#define PIO_XFER_DATA_PORT 0x08
+#define PIO_IBI_PORT 0x0c
+
+#define PIO_QUEUE_THLD_CTRL 0x10
+#define QUEUE_IBI_STATUS_THLD GENMASK(31, 24)
+#define QUEUE_IBI_DATA_THLD GENMASK(23, 16)
+#define QUEUE_RESP_BUF_THLD GENMASK(15, 8)
+#define QUEUE_CMD_EMPTY_BUF_THLD GENMASK(7, 0)
+
+#define PIO_DATA_BUFFER_THLD_CTRL 0x14
+#define DATA_RX_START_THLD GENMASK(26, 24)
+#define DATA_TX_START_THLD GENMASK(18, 16)
+#define DATA_RX_BUF_THLD GENMASK(10, 8)
+#define DATA_TX_BUF_THLD GENMASK(2, 0)
+
+#define PIO_QUEUE_SIZE 0x18
+#define TX_DATA_BUFFER_SIZE GENMASK(31, 24)
+#define RX_DATA_BUFFER_SIZE GENMASK(23, 16)
+#define IBI_STATUS_SIZE GENMASK(15, 8)
+#define CR_QUEUE_SIZE GENMASK(7, 0)
+
+#define PIO_INTR_STATUS 0x20
+#define PIO_INTR_STATUS_ENABLE 0x24
+#define PIO_INTR_SIGNAL_ENABLE 0x28
+#define PIO_INTR_FORCE 0x2c
+#define STAT_TRANSFER_BLOCKED BIT(25)
+#define STAT_PERR_RESP_UFLOW BIT(24)
+#define STAT_PERR_CMD_OFLOW BIT(23)
+#define STAT_PERR_IBI_UFLOW BIT(22)
+#define STAT_PERR_RX_UFLOW BIT(21)
+#define STAT_PERR_TX_OFLOW BIT(20)
+#define STAT_ERR_RESP_QUEUE_FULL BIT(19)
+#define STAT_WARN_RESP_QUEUE_FULL BIT(18)
+#define STAT_ERR_IBI_QUEUE_FULL BIT(17)
+#define STAT_WARN_IBI_QUEUE_FULL BIT(16)
+#define STAT_ERR_RX_DATA_FULL BIT(15)
+#define STAT_WARN_RX_DATA_FULL BIT(14)
+#define STAT_ERR_TX_DATA_EMPTY BIT(13)
+#define STAT_WARN_TX_DATA_EMPTY BIT(12)
+#define STAT_TRANSFER_ERR BIT(9)
+#define STAT_WARN_INS_STOP_MODE BIT(7)
+#define STAT_TRANSFER_ABORT BIT(5)
+#define STAT_RESP_READY BIT(4)
+#define STAT_CMD_QUEUE_READY BIT(3)
+#define STAT_IBI_STATUS_THLD BIT(2)
+#define STAT_RX_THLD BIT(1)
+#define STAT_TX_THLD BIT(0)
+
+#define PIO_QUEUE_CUR_STATUS 0x38
+#define CUR_IBI_Q_LEVEL GENMASK(28, 20)
+#define CUR_RESP_Q_LEVEL GENMASK(18, 10)
+#define CUR_CMD_Q_EMPTY_LEVEL GENMASK(8, 0)
+
+#define PIO_DATA_BUFFER_CUR_STATUS 0x3c
+#define CUR_RX_BUF_LVL GENMASK(26, 16)
+#define CUR_TX_BUF_LVL GENMASK(10, 0)
+
+/*
+ * Handy status bit combinations
+ */
+
+#define STAT_LATENCY_WARNINGS (STAT_WARN_RESP_QUEUE_FULL | \
+ STAT_WARN_IBI_QUEUE_FULL | \
+ STAT_WARN_RX_DATA_FULL | \
+ STAT_WARN_TX_DATA_EMPTY | \
+ STAT_WARN_INS_STOP_MODE)
+
+#define STAT_LATENCY_ERRORS (STAT_ERR_RESP_QUEUE_FULL | \
+ STAT_ERR_IBI_QUEUE_FULL | \
+ STAT_ERR_RX_DATA_FULL | \
+ STAT_ERR_TX_DATA_EMPTY)
+
+#define STAT_PROG_ERRORS (STAT_TRANSFER_BLOCKED | \
+ STAT_PERR_RESP_UFLOW | \
+ STAT_PERR_CMD_OFLOW | \
+ STAT_PERR_IBI_UFLOW | \
+ STAT_PERR_RX_UFLOW | \
+ STAT_PERR_TX_OFLOW)
+
+#define STAT_ALL_ERRORS (STAT_TRANSFER_ABORT | \
+ STAT_TRANSFER_ERR | \
+ STAT_LATENCY_ERRORS | \
+ STAT_PROG_ERRORS)
+
+struct hci_pio_dev_ibi_data {
+ struct i3c_generic_ibi_pool *pool;
+ unsigned int max_len;
+};
+
+struct hci_pio_ibi_data {
+ struct i3c_ibi_slot *slot;
+ void *data_ptr;
+ unsigned int addr;
+ unsigned int seg_len, seg_cnt;
+ unsigned int max_len;
+ bool last_seg;
+};
+
+struct hci_pio_data {
+ spinlock_t lock;
+ struct hci_xfer *curr_xfer, *xfer_queue;
+ struct hci_xfer *curr_rx, *rx_queue;
+ struct hci_xfer *curr_tx, *tx_queue;
+ struct hci_xfer *curr_resp, *resp_queue;
+ struct hci_pio_ibi_data ibi;
+ unsigned int rx_thresh_size, tx_thresh_size;
+ unsigned int max_ibi_thresh;
+ u32 reg_queue_thresh;
+ u32 enabled_irqs;
+};
+
+static int hci_pio_init(struct i3c_hci *hci)
+{
+ struct hci_pio_data *pio;
+ u32 val, size_val, rx_thresh, tx_thresh, ibi_val;
+
+ pio = kzalloc(sizeof(*pio), GFP_KERNEL);
+ if (!pio)
+ return -ENOMEM;
+
+ hci->io_data = pio;
+ spin_lock_init(&pio->lock);
+
+ size_val = pio_reg_read(QUEUE_SIZE);
+ dev_info(&hci->master.dev, "CMD/RESP FIFO = %ld entries\n",
+ FIELD_GET(CR_QUEUE_SIZE, size_val));
+ dev_info(&hci->master.dev, "IBI FIFO = %ld bytes\n",
+ 4 * FIELD_GET(IBI_STATUS_SIZE, size_val));
+ dev_info(&hci->master.dev, "RX data FIFO = %d bytes\n",
+ 4 * (2 << FIELD_GET(RX_DATA_BUFFER_SIZE, size_val)));
+ dev_info(&hci->master.dev, "TX data FIFO = %d bytes\n",
+ 4 * (2 << FIELD_GET(TX_DATA_BUFFER_SIZE, size_val)));
+
+ /*
+ * Let's initialize data thresholds to half of the actual FIFO size.
+ * The start thresholds aren't used (set to 0) as the FIFO is always
+ * serviced before the corresponding command is queued.
+ */
+ rx_thresh = FIELD_GET(RX_DATA_BUFFER_SIZE, size_val);
+ tx_thresh = FIELD_GET(TX_DATA_BUFFER_SIZE, size_val);
+ if (hci->version_major == 1) {
+ /* those are expressed as 2^[n+1), so just sub 1 if not 0 */
+ if (rx_thresh)
+ rx_thresh -= 1;
+ if (tx_thresh)
+ tx_thresh -= 1;
+ pio->rx_thresh_size = 2 << rx_thresh;
+ pio->tx_thresh_size = 2 << tx_thresh;
+ } else {
+ /* size is 2^(n+1) and threshold is 2^n i.e. already halved */
+ pio->rx_thresh_size = 1 << rx_thresh;
+ pio->tx_thresh_size = 1 << tx_thresh;
+ }
+ val = FIELD_PREP(DATA_RX_BUF_THLD, rx_thresh) |
+ FIELD_PREP(DATA_TX_BUF_THLD, tx_thresh);
+ pio_reg_write(DATA_BUFFER_THLD_CTRL, val);
+
+ /*
+ * Let's raise an interrupt as soon as there is one free cmd slot
+ * or one available response or IBI. For IBI data let's use half the
+ * IBI queue size within allowed bounds.
+ */
+ ibi_val = FIELD_GET(IBI_STATUS_SIZE, size_val);
+ pio->max_ibi_thresh = clamp_val(ibi_val/2, 1, 63);
+ val = FIELD_PREP(QUEUE_IBI_STATUS_THLD, 1) |
+ FIELD_PREP(QUEUE_IBI_DATA_THLD, pio->max_ibi_thresh) |
+ FIELD_PREP(QUEUE_RESP_BUF_THLD, 1) |
+ FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1);
+ pio_reg_write(QUEUE_THLD_CTRL, val);
+ pio->reg_queue_thresh = val;
+
+ /* Disable all IRQs but allow all status bits */
+ pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
+ pio_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+
+ /* Always accept error interrupts (will be activated on first xfer) */
+ pio->enabled_irqs = STAT_ALL_ERRORS;
+
+ return 0;
+}
+
+static void hci_pio_cleanup(struct i3c_hci *hci)
+{
+ struct hci_pio_data *pio = hci->io_data;
+
+ pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
+
+ if (pio) {
+ DBG("status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ BUG_ON(pio->curr_xfer);
+ BUG_ON(pio->curr_rx);
+ BUG_ON(pio->curr_tx);
+ BUG_ON(pio->curr_resp);
+ kfree(pio);
+ hci->io_data = NULL;
+ }
+}
+
+static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
+{
+ DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
+ DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
+ DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
+ }
+}
+
+static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_rx;
+ unsigned int nr_words;
+ u32 *p;
+
+ p = xfer->data;
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ while (xfer->data_left >= 4) {
+ /* bail out if FIFO hasn't reached the threshold value yet */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_RX_THLD))
+ return false;
+ nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
+ /* extract data from FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ while (nr_words--)
+ *p++ = pio_reg_read(XFER_DATA_PORT);
+ }
+
+ /* trailing data is retrieved upon response reception */
+ return !xfer->data_left;
+}
+
+static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
+ struct hci_pio_data *pio, unsigned int count)
+{
+ struct hci_xfer *xfer = pio->curr_rx;
+ u32 *p;
+
+ DBG("%d remaining", count);
+
+ p = xfer->data;
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ if (count >= 4) {
+ unsigned int nr_words = count / 4;
+ /* extract data from FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ while (nr_words--)
+ *p++ = pio_reg_read(XFER_DATA_PORT);
+ }
+
+ count &= 3;
+ if (count) {
+ /*
+ * There are trailing bytes in the last word.
+ * Fetch it and extract bytes in an endian independent way.
+ * Unlike the TX case, we must not write memory past the
+ * end of the destination buffer.
+ */
+ u8 *p_byte = (u8 *)p;
+ u32 data = pio_reg_read(XFER_DATA_PORT);
+
+ xfer->data_word_before_partial = data;
+ xfer->data_left -= count;
+ data = (__force u32) cpu_to_le32(data);
+ while (count--) {
+ *p_byte++ = data;
+ data >>= 8;
+ }
+ }
+}
+
+static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_tx;
+ unsigned int nr_words;
+ u32 *p;
+
+ p = xfer->data;
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ while (xfer->data_left >= 4) {
+ /* bail out if FIFO free space is below set threshold */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
+ return false;
+ /* we can fill up to that TX threshold */
+ nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
+ /* push data into the FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ while (nr_words--)
+ pio_reg_write(XFER_DATA_PORT, *p++);
+ }
+
+ if (xfer->data_left) {
+ /*
+ * There are trailing bytes to send. We can simply load
+ * them from memory as a word which will keep those bytes
+ * in their proper place even on a BE system. This will
+ * also get some bytes past the actual buffer but no one
+ * should care as they won't be sent out.
+ */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
+ return false;
+ DBG("trailing %d", xfer->data_left);
+ pio_reg_write(XFER_DATA_PORT, *p);
+ xfer->data_left = 0;
+ }
+
+ return true;
+}
+
+static bool hci_pio_process_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_rx && hci_pio_do_rx(hci, pio))
+ pio->curr_rx = pio->curr_rx->next_data;
+ return !pio->curr_rx;
+}
+
+static bool hci_pio_process_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_tx && hci_pio_do_tx(hci, pio))
+ pio->curr_tx = pio->curr_tx->next_data;
+ return !pio->curr_tx;
+}
+
+static void hci_pio_queue_data(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_xfer;
+ struct hci_xfer *prev_queue_tail;
+
+ if (!xfer->data) {
+ xfer->data_len = xfer->data_left = 0;
+ return;
+ }
+
+ if (xfer->rnw) {
+ prev_queue_tail = pio->rx_queue;
+ pio->rx_queue = xfer;
+ if (pio->curr_rx) {
+ prev_queue_tail->next_data = xfer;
+ } else {
+ pio->curr_rx = xfer;
+ if (!hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs |= STAT_RX_THLD;
+ }
+ } else {
+ prev_queue_tail = pio->tx_queue;
+ pio->tx_queue = xfer;
+ if (pio->curr_tx) {
+ prev_queue_tail->next_data = xfer;
+ } else {
+ pio->curr_tx = xfer;
+ if (!hci_pio_process_tx(hci, pio))
+ pio->enabled_irqs |= STAT_TX_THLD;
+ }
+ }
+}
+
+static void hci_pio_push_to_next_rx(struct i3c_hci *hci, struct hci_xfer *xfer,
+ unsigned int words_to_keep)
+{
+ u32 *from = xfer->data;
+ u32 from_last;
+ unsigned int received, count;
+
+ received = (xfer->data_len - xfer->data_left) / 4;
+ if ((xfer->data_len - xfer->data_left) & 3) {
+ from_last = xfer->data_word_before_partial;
+ received += 1;
+ } else {
+ from_last = from[received];
+ }
+ from += words_to_keep;
+ count = received - words_to_keep;
+
+ while (count) {
+ unsigned int room, left, chunk, bytes_to_move;
+ u32 last_word;
+
+ xfer = xfer->next_data;
+ if (!xfer) {
+ dev_err(&hci->master.dev, "pushing RX data to unexistent xfer\n");
+ return;
+ }
+
+ room = DIV_ROUND_UP(xfer->data_len, 4);
+ left = DIV_ROUND_UP(xfer->data_left, 4);
+ chunk = min(count, room);
+ if (chunk > left) {
+ hci_pio_push_to_next_rx(hci, xfer, chunk - left);
+ left = chunk;
+ xfer->data_left = left * 4;
+ }
+
+ bytes_to_move = xfer->data_len - xfer->data_left;
+ if (bytes_to_move & 3) {
+ /* preserve word to become partial */
+ u32 *p = xfer->data;
+
+ xfer->data_word_before_partial = p[bytes_to_move / 4];
+ }
+ memmove(xfer->data + chunk, xfer->data, bytes_to_move);
+
+ /* treat last word specially because of partial word issues */
+ chunk -= 1;
+
+ memcpy(xfer->data, from, chunk * 4);
+ xfer->data_left -= chunk * 4;
+ from += chunk;
+ count -= chunk;
+
+ last_word = (count == 1) ? from_last : *from++;
+ if (xfer->data_left < 4) {
+ /*
+ * Like in hci_pio_do_trailing_rx(), preserve original
+ * word to be stored partially then store bytes it
+ * in an endian independent way.
+ */
+ u8 *p_byte = xfer->data;
+
+ p_byte += chunk * 4;
+ xfer->data_word_before_partial = last_word;
+ last_word = (__force u32) cpu_to_le32(last_word);
+ while (xfer->data_left--) {
+ *p_byte++ = last_word;
+ last_word >>= 8;
+ }
+ } else {
+ u32 *p = xfer->data;
+
+ p[chunk] = last_word;
+ xfer->data_left -= 4;
+ }
+ count--;
+ }
+}
+
+static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
+ u32 status);
+
+static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_resp &&
+ (pio_reg_read(INTR_STATUS) & STAT_RESP_READY)) {
+ struct hci_xfer *xfer = pio->curr_resp;
+ u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
+ unsigned int tid = RESP_TID(resp);
+
+ DBG("resp = 0x%08x", resp);
+ if (tid != xfer->cmd_tid) {
+ dev_err(&hci->master.dev,
+ "response tid=%d when expecting %d\n",
+ tid, xfer->cmd_tid);
+ /* let's pretend it is a prog error... any of them */
+ hci_pio_err(hci, pio, STAT_PROG_ERRORS);
+ return false;
+ }
+ xfer->response = resp;
+
+ if (pio->curr_rx == xfer) {
+ /*
+ * Response availability implies RX completion.
+ * Retrieve trailing RX data if any.
+ * Note that short reads are possible.
+ */
+ unsigned int received, expected, to_keep;
+
+ received = xfer->data_len - xfer->data_left;
+ expected = RESP_DATA_LENGTH(xfer->response);
+ if (expected > received) {
+ hci_pio_do_trailing_rx(hci, pio,
+ expected - received);
+ } else if (received > expected) {
+ /* we consumed data meant for next xfer */
+ to_keep = DIV_ROUND_UP(expected, 4);
+ hci_pio_push_to_next_rx(hci, xfer, to_keep);
+ }
+
+ /* then process the RX list pointer */
+ if (hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs &= ~STAT_RX_THLD;
+ }
+
+ /*
+ * We're about to give back ownership of the xfer structure
+ * to the waiting instance. Make sure no reference to it
+ * still exists.
+ */
+ if (pio->curr_rx == xfer) {
+ DBG("short RX ?");
+ pio->curr_rx = pio->curr_rx->next_data;
+ } else if (pio->curr_tx == xfer) {
+ DBG("short TX ?");
+ pio->curr_tx = pio->curr_tx->next_data;
+ } else if (xfer->data_left) {
+ DBG("PIO xfer count = %d after response",
+ xfer->data_left);
+ }
+
+ pio->curr_resp = xfer->next_resp;
+ if (xfer->completion)
+ complete(xfer->completion);
+ }
+ return !pio->curr_resp;
+}
+
+static void hci_pio_queue_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_xfer;
+ struct hci_xfer *prev_queue_tail;
+
+ if (!(xfer->cmd_desc[0] & CMD_0_ROC))
+ return;
+
+ prev_queue_tail = pio->resp_queue;
+ pio->resp_queue = xfer;
+ if (pio->curr_resp) {
+ prev_queue_tail->next_resp = xfer;
+ } else {
+ pio->curr_resp = xfer;
+ if (!hci_pio_process_resp(hci, pio))
+ pio->enabled_irqs |= STAT_RESP_READY;
+ }
+}
+
+static bool hci_pio_process_cmd(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_xfer &&
+ (pio_reg_read(INTR_STATUS) & STAT_CMD_QUEUE_READY)) {
+ /*
+ * Always process the data FIFO before sending the command
+ * so needed TX data or RX space is available upfront.
+ */
+ hci_pio_queue_data(hci, pio);
+ /*
+ * Then queue our response request. This will also process
+ * the response FIFO in case it got suddenly filled up
+ * with results from previous commands.
+ */
+ hci_pio_queue_resp(hci, pio);
+ /*
+ * Finally send the command.
+ */
+ hci_pio_write_cmd(hci, pio->curr_xfer);
+ /*
+ * And move on.
+ */
+ pio->curr_xfer = pio->curr_xfer->next_xfer;
+ }
+ return !pio->curr_xfer;
+}
+
+static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
+{
+ struct hci_pio_data *pio = hci->io_data;
+ struct hci_xfer *prev_queue_tail;
+ int i;
+
+ DBG("n = %d", n);
+
+ /* link xfer instances together and initialize data count */
+ for (i = 0; i < n; i++) {
+ xfer[i].next_xfer = (i + 1 < n) ? &xfer[i + 1] : NULL;
+ xfer[i].next_data = NULL;
+ xfer[i].next_resp = NULL;
+ xfer[i].data_left = xfer[i].data_len;
+ }
+
+ spin_lock_irq(&pio->lock);
+ prev_queue_tail = pio->xfer_queue;
+ pio->xfer_queue = &xfer[n - 1];
+ if (pio->curr_xfer) {
+ prev_queue_tail->next_xfer = xfer;
+ } else {
+ pio->curr_xfer = xfer;
+ if (!hci_pio_process_cmd(hci, pio))
+ pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
+ pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
+ DBG("status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ }
+ spin_unlock_irq(&pio->lock);
+ return 0;
+}
+
+static bool hci_pio_dequeue_xfer_common(struct i3c_hci *hci,
+ struct hci_pio_data *pio,
+ struct hci_xfer *xfer, int n)
+{
+ struct hci_xfer *p, **p_prev_next;
+ int i;
+
+ /*
+ * To safely dequeue a transfer request, it must be either entirely
+ * processed, or not yet processed at all. If our request tail is
+ * reachable from either the data or resp list that means the command
+ * was submitted and not yet completed.
+ */
+ for (p = pio->curr_resp; p; p = p->next_resp)
+ for (i = 0; i < n; i++)
+ if (p == &xfer[i])
+ goto pio_screwed;
+ for (p = pio->curr_rx; p; p = p->next_data)
+ for (i = 0; i < n; i++)
+ if (p == &xfer[i])
+ goto pio_screwed;
+ for (p = pio->curr_tx; p; p = p->next_data)
+ for (i = 0; i < n; i++)
+ if (p == &xfer[i])
+ goto pio_screwed;
+
+ /*
+ * The command was completed, or wasn't yet submitted.
+ * Unlink it from the que if the later.
+ */
+ p_prev_next = &pio->curr_xfer;
+ for (p = pio->curr_xfer; p; p = p->next_xfer) {
+ if (p == &xfer[0]) {
+ *p_prev_next = xfer[n - 1].next_xfer;
+ break;
+ }
+ p_prev_next = &p->next_xfer;
+ }
+
+ /* return true if we actually unqueued something */
+ return !!p;
+
+pio_screwed:
+ /*
+ * Life is tough. We must invalidate the hardware state and
+ * discard everything that is still queued.
+ */
+ for (p = pio->curr_resp; p; p = p->next_resp) {
+ p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
+ if (p->completion)
+ complete(p->completion);
+ }
+ for (p = pio->curr_xfer; p; p = p->next_xfer) {
+ p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
+ if (p->completion)
+ complete(p->completion);
+ }
+ pio->curr_xfer = pio->curr_rx = pio->curr_tx = pio->curr_resp = NULL;
+
+ return true;
+}
+
+static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
+{
+ struct hci_pio_data *pio = hci->io_data;
+ int ret;
+
+ spin_lock_irq(&pio->lock);
+ DBG("n=%d status=%#x/%#x", n,
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ DBG("main_status = %#x/%#x",
+ readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
+
+ ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
+ spin_unlock_irq(&pio->lock);
+ return ret;
+}
+
+static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
+ u32 status)
+{
+ /* TODO: this ought to be more sophisticated eventually */
+
+ if (pio_reg_read(INTR_STATUS) & STAT_RESP_READY) {
+ /* this may happen when an error is signaled with ROC unset */
+ u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
+
+ dev_err(&hci->master.dev,
+ "orphan response (%#x) on error\n", resp);
+ }
+
+ /* dump states on programming errors */
+ if (status & STAT_PROG_ERRORS) {
+ u32 queue = pio_reg_read(QUEUE_CUR_STATUS);
+ u32 data = pio_reg_read(DATA_BUFFER_CUR_STATUS);
+
+ dev_err(&hci->master.dev,
+ "prog error %#lx (C/R/I = %ld/%ld/%ld, TX/RX = %ld/%ld)\n",
+ status & STAT_PROG_ERRORS,
+ FIELD_GET(CUR_CMD_Q_EMPTY_LEVEL, queue),
+ FIELD_GET(CUR_RESP_Q_LEVEL, queue),
+ FIELD_GET(CUR_IBI_Q_LEVEL, queue),
+ FIELD_GET(CUR_TX_BUF_LVL, data),
+ FIELD_GET(CUR_RX_BUF_LVL, data));
+ }
+
+ /* just bust out everything with pending responses for now */
+ hci_pio_dequeue_xfer_common(hci, pio, pio->curr_resp, 1);
+ /* ... and half-way TX transfers if any */
+ if (pio->curr_tx && pio->curr_tx->data_left != pio->curr_tx->data_len)
+ hci_pio_dequeue_xfer_common(hci, pio, pio->curr_tx, 1);
+ /* then reset the hardware */
+ mipi_i3c_hci_pio_reset(hci);
+ mipi_i3c_hci_resume(hci);
+
+ DBG("status=%#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+}
+
+static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
+ struct hci_pio_data *pio,
+ unsigned int thresh_val)
+{
+ u32 regval = pio->reg_queue_thresh;
+
+ regval &= ~QUEUE_IBI_STATUS_THLD;
+ regval |= FIELD_PREP(QUEUE_IBI_STATUS_THLD, thresh_val);
+ /* write the threshold reg only if it changes */
+ if (regval != pio->reg_queue_thresh) {
+ pio_reg_write(QUEUE_THLD_CTRL, regval);
+ pio->reg_queue_thresh = regval;
+ DBG("%d", thresh_val);
+ }
+}
+
+static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
+ struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+ unsigned int nr_words, thresh_val;
+ u32 *p;
+
+ p = ibi->data_ptr;
+ p += (ibi->seg_len - ibi->seg_cnt) / 4;
+
+ while ((nr_words = ibi->seg_cnt/4)) {
+ /* determine our IBI queue threshold value */
+ thresh_val = min(nr_words, pio->max_ibi_thresh);
+ hci_pio_set_ibi_thresh(hci, pio, thresh_val);
+ /* bail out if we don't have that amount of data ready */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ /* extract the data from the IBI port */
+ nr_words = thresh_val;
+ ibi->seg_cnt -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
+ while (nr_words--)
+ *p++ = pio_reg_read(IBI_PORT);
+ }
+
+ if (ibi->seg_cnt) {
+ /*
+ * There are trailing bytes in the last word.
+ * Fetch it and extract bytes in an endian independent way.
+ * Unlike the TX case, we must not write past the end of
+ * the destination buffer.
+ */
+ u32 data;
+ u8 *p_byte = (u8 *)p;
+
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ DBG("trailing %d", ibi->seg_cnt);
+ data = pio_reg_read(IBI_PORT);
+ data = (__force u32) cpu_to_le32(data);
+ while (ibi->seg_cnt--) {
+ *p_byte++ = data;
+ data >>= 8;
+ }
+ }
+
+ return true;
+}
+
+static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+ struct i3c_dev_desc *dev;
+ struct i3c_hci_dev_data *dev_data;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+ u32 ibi_status;
+
+ /*
+ * We have a new IBI. Try to set up its payload retrieval.
+ * When returning true, the IBI data has to be consumed whether
+ * or not we are set up to capture it. If we return true with
+ * ibi->slot == NULL that means the data payload has to be
+ * drained out of the IBI port and dropped.
+ */
+
+ ibi_status = pio_reg_read(IBI_PORT);
+ DBG("status = %#x", ibi_status);
+ ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+ if (ibi_status & IBI_ERROR) {
+ dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
+ return false;
+ }
+
+ ibi->last_seg = ibi_status & IBI_LAST_STATUS;
+ ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
+ ibi->seg_cnt = ibi->seg_len;
+
+ dev = i3c_hci_addr_to_dev(hci, ibi->addr);
+ if (!dev) {
+ dev_err(&hci->master.dev,
+ "IBI for unknown device %#x\n", ibi->addr);
+ return true;
+ }
+
+ dev_data = i3c_dev_get_master_data(dev);
+ dev_ibi = dev_data->ibi_data;
+ ibi->max_len = dev_ibi->max_len;
+
+ if (ibi->seg_len > ibi->max_len) {
+ dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
+ ibi->seg_len, ibi->max_len);
+ return true;
+ }
+
+ ibi->slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
+ if (!ibi->slot) {
+ dev_err(&hci->master.dev, "no free slot for IBI\n");
+ } else {
+ ibi->slot->len = 0;
+ ibi->data_ptr = ibi->slot->data;
+ }
+ return true;
+}
+
+static void hci_pio_free_ibi_slot(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+
+ if (ibi->slot) {
+ dev_ibi = ibi->slot->dev->common.master_priv;
+ i3c_generic_ibi_recycle_slot(dev_ibi->pool, ibi->slot);
+ ibi->slot = NULL;
+ }
+}
+
+static bool hci_pio_process_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+
+ if (!ibi->slot && !ibi->seg_cnt && ibi->last_seg)
+ if (!hci_pio_prep_new_ibi(hci, pio))
+ return false;
+
+ for (;;) {
+ u32 ibi_status;
+ unsigned int ibi_addr;
+
+ if (ibi->slot) {
+ if (!hci_pio_get_ibi_segment(hci, pio))
+ return false;
+ ibi->slot->len += ibi->seg_len;
+ ibi->data_ptr += ibi->seg_len;
+ if (ibi->last_seg) {
+ /* was the last segment: submit it and leave */
+ i3c_master_queue_ibi(ibi->slot->dev, ibi->slot);
+ ibi->slot = NULL;
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ return true;
+ }
+ } else if (ibi->seg_cnt) {
+ /*
+ * No slot but a non-zero count. This is the result
+ * of some error and the payload must be drained.
+ * This normally does not happen therefore no need
+ * to be extra optimized here.
+ */
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ do {
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ pio_reg_read(IBI_PORT);
+ } while (--ibi->seg_cnt);
+ if (ibi->last_seg)
+ return true;
+ }
+
+ /* try to move to the next segment right away */
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ ibi_status = pio_reg_read(IBI_PORT);
+ ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+ if (ibi->addr != ibi_addr) {
+ /* target address changed before last segment */
+ dev_err(&hci->master.dev,
+ "unexp IBI address changed from %d to %d\n",
+ ibi->addr, ibi_addr);
+ hci_pio_free_ibi_slot(hci, pio);
+ }
+ ibi->last_seg = ibi_status & IBI_LAST_STATUS;
+ ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
+ ibi->seg_cnt = ibi->seg_len;
+ if (ibi->slot && ibi->slot->len + ibi->seg_len > ibi->max_len) {
+ dev_err(&hci->master.dev,
+ "IBI payload too big (%d > %d)\n",
+ ibi->slot->len + ibi->seg_len, ibi->max_len);
+ hci_pio_free_ibi_slot(hci, pio);
+ }
+ }
+
+ return false;
+}
+
+static int hci_pio_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct i3c_generic_ibi_pool *pool;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+
+ dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
+ if (!dev_ibi)
+ return -ENOMEM;
+ pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(pool)) {
+ kfree(dev_ibi);
+ return PTR_ERR(pool);
+ }
+ dev_ibi->pool = pool;
+ dev_ibi->max_len = req->max_payload_len;
+ dev_data->ibi_data = dev_ibi;
+ return 0;
+}
+
+static void hci_pio_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ dev_data->ibi_data = NULL;
+ i3c_generic_ibi_free_pool(dev_ibi->pool);
+ kfree(dev_ibi);
+}
+
+static void hci_pio_recycle_ibi_slot(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
+}
+
+static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
+{
+ struct hci_pio_data *pio = hci->io_data;
+ u32 status;
+
+ spin_lock(&pio->lock);
+ status = pio_reg_read(INTR_STATUS);
+ DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
+ status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
+ if (!status) {
+ spin_unlock(&pio->lock);
+ return false;
+ }
+
+ if (status & STAT_IBI_STATUS_THLD)
+ hci_pio_process_ibi(hci, pio);
+
+ if (status & STAT_RX_THLD)
+ if (hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs &= ~STAT_RX_THLD;
+ if (status & STAT_TX_THLD)
+ if (hci_pio_process_tx(hci, pio))
+ pio->enabled_irqs &= ~STAT_TX_THLD;
+ if (status & STAT_RESP_READY)
+ if (hci_pio_process_resp(hci, pio))
+ pio->enabled_irqs &= ~STAT_RESP_READY;
+
+ if (unlikely(status & STAT_LATENCY_WARNINGS)) {
+ pio_reg_write(INTR_STATUS, status & STAT_LATENCY_WARNINGS);
+ dev_warn_ratelimited(&hci->master.dev,
+ "encountered warning condition %#lx\n",
+ status & STAT_LATENCY_WARNINGS);
+ }
+
+ if (unlikely(status & STAT_ALL_ERRORS)) {
+ pio_reg_write(INTR_STATUS, status & STAT_ALL_ERRORS);
+ hci_pio_err(hci, pio, status & STAT_ALL_ERRORS);
+ }
+
+ if (status & STAT_CMD_QUEUE_READY)
+ if (hci_pio_process_cmd(hci, pio))
+ pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
+
+ pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
+ DBG("(out) status: %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ spin_unlock(&pio->lock);
+ return true;
+}
+
+const struct hci_io_ops mipi_i3c_hci_pio = {
+ .init = hci_pio_init,
+ .cleanup = hci_pio_cleanup,
+ .queue_xfer = hci_pio_queue_xfer,
+ .dequeue_xfer = hci_pio_dequeue_xfer,
+ .irq_handler = hci_pio_irq_handler,
+ .request_ibi = hci_pio_request_ibi,
+ .free_ibi = hci_pio_free_ibi,
+ .recycle_ibi_slot = hci_pio_recycle_ibi_slot,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h b/drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h
new file mode 100644
index 000000000000..1e36b75afb16
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Transfer Mode/Rate Table definitions as found in extended capability
+ * sections 0x04 and 0x08.
+ * This applies starting from I3C HCI v2.0.
+ */
+
+#ifndef XFER_MODE_RATE_H
+#define XFER_MODE_RATE_H
+
+/*
+ * Master Transfer Mode Table Fixed Indexes.
+ *
+ * Indexes 0x0 and 0x8 are mandatory. Availability for the rest must be
+ * obtained from the mode table in the extended capability area.
+ * Presence and definitions for indexes beyond these ones may vary.
+ */
+#define XFERMODE_IDX_I3C_SDR 0x00 /* I3C SDR Mode */
+#define XFERMODE_IDX_I3C_HDR_DDR 0x01 /* I3C HDR-DDR Mode */
+#define XFERMODE_IDX_I3C_HDR_T 0x02 /* I3C HDR-Ternary Mode */
+#define XFERMODE_IDX_I3C_HDR_BT 0x03 /* I3C HDR-BT Mode */
+#define XFERMODE_IDX_I2C 0x08 /* Legacy I2C Mode */
+
+/*
+ * Transfer Mode Table Entry Bits Definitions
+ */
+#define XFERMODE_VALID_XFER_ADD_FUNC GENMASK(21, 16)
+#define XFERMODE_ML_DATA_XFER_CODING GENMASK(15, 11)
+#define XFERMODE_ML_ADDL_LANES GENMASK(10, 8)
+#define XFERMODE_SUPPORTED BIT(7)
+#define XFERMODE_MODE GENMASK(3, 0)
+
+/*
+ * Master Data Transfer Rate Selector Values.
+ *
+ * These are the values to be used in the command descriptor XFER_RATE field
+ * and found in the RATE_ID field below.
+ * The I3C_SDR0, I3C_SDR1, I3C_SDR2, I3C_SDR3, I3C_SDR4 and I2C_FM rates
+ * are required, everything else is optional and discoverable in the
+ * Data Transfer Rate Table. Indicated are typical rates. The actual
+ * rates may vary slightly and are also specified in the Data Transfer
+ * Rate Table.
+ */
+#define XFERRATE_I3C_SDR0 0x00 /* 12.5 MHz */
+#define XFERRATE_I3C_SDR1 0x01 /* 8 MHz */
+#define XFERRATE_I3C_SDR2 0x02 /* 6 MHz */
+#define XFERRATE_I3C_SDR3 0x03 /* 4 MHz */
+#define XFERRATE_I3C_SDR4 0x04 /* 2 MHz */
+#define XFERRATE_I3C_SDR_FM_FMP 0x05 /* 400 KHz / 1 MHz */
+#define XFERRATE_I3C_SDR_USER6 0x06 /* User Defined */
+#define XFERRATE_I3C_SDR_USER7 0x07 /* User Defined */
+
+#define XFERRATE_I2C_FM 0x00 /* 400 KHz */
+#define XFERRATE_I2C_FMP 0x01 /* 1 MHz */
+#define XFERRATE_I2C_USER2 0x02 /* User Defined */
+#define XFERRATE_I2C_USER3 0x03 /* User Defined */
+#define XFERRATE_I2C_USER4 0x04 /* User Defined */
+#define XFERRATE_I2C_USER5 0x05 /* User Defined */
+#define XFERRATE_I2C_USER6 0x06 /* User Defined */
+#define XFERRATE_I2C_USER7 0x07 /* User Defined */
+
+/*
+ * Master Data Transfer Rate Table Mode ID values.
+ */
+#define XFERRATE_MODE_I3C 0x00
+#define XFERRATE_MODE_I2C 0x08
+
+/*
+ * Master Data Transfer Rate Table Entry Bits Definitions
+ */
+#define XFERRATE_MODE_ID GENMASK(31, 28)
+#define XFERRATE_RATE_ID GENMASK(22, 20)
+#define XFERRATE_ACTUAL_RATE_KHZ GENMASK(19, 0)
+
+#endif
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 2162bc80f09e..013ad33fbbc8 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd_flags = REQ_OP_DRV_IN;
ide_req(sense_rq)->type = ATA_PRIV_SENSE;
- sense_rq->rq_flags |= RQF_PREEMPT;
req->cmd[0] = GPCMD_REQUEST_SENSE;
req->cmd[4] = cmd_len;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1a53c7a75224..4867b67b60d6 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -515,15 +515,10 @@ repeat:
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
- *
- * We let requests forced at head of queue with ide-preempt
- * though. I hope that doesn't happen too much, hopefully not
- * unless the subdriver triggers such a thing in its own PM
- * state machine.
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
- (rq->rq_flags & RQF_PREEMPT) == 0) {
+ (rq->rq_flags & RQF_PM) == 0) {
/* there should be no pending command at this point */
ide_unlock_port(hwif);
goto plug_device;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 192e6c65d34e..82ab308f1aaf 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -77,7 +77,7 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 430b29e0abdb..aefd74c0d862 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -902,65 +902,14 @@ out_up:
return 1;
}
-static int ata_lock(dev_t dev, void *data)
+static void ata_probe(dev_t dev)
{
- /* FIXME: we want to pin hwif down */
- return 0;
+ request_module("ide-disk");
+ request_module("ide-cd");
+ request_module("ide-tape");
+ request_module("ide-floppy");
}
-static struct kobject *ata_probe(dev_t dev, int *part, void *data)
-{
- ide_hwif_t *hwif = data;
- int unit = *part >> PARTN_BITS;
- ide_drive_t *drive = hwif->devices[unit];
-
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- return NULL;
-
- if (drive->media == ide_disk)
- request_module("ide-disk");
- if (drive->media == ide_cdrom || drive->media == ide_optical)
- request_module("ide-cd");
- if (drive->media == ide_tape)
- request_module("ide-tape");
- if (drive->media == ide_floppy)
- request_module("ide-floppy");
-
- return NULL;
-}
-
-static struct kobject *exact_match(dev_t dev, int *part, void *data)
-{
- struct gendisk *p = data;
- *part &= (1 << PARTN_BITS) - 1;
- return &disk_to_dev(p)->kobj;
-}
-
-static int exact_lock(dev_t dev, void *data)
-{
- struct gendisk *p = data;
-
- if (!get_disk_and_module(p))
- return -1;
- return 0;
-}
-
-void ide_register_region(struct gendisk *disk)
-{
- blk_register_region(MKDEV(disk->major, disk->first_minor),
- disk->minors, NULL, exact_match, exact_lock, disk);
-}
-
-EXPORT_SYMBOL_GPL(ide_register_region);
-
-void ide_unregister_region(struct gendisk *disk)
-{
- blk_unregister_region(MKDEV(disk->major, disk->first_minor),
- disk->minors);
-}
-
-EXPORT_SYMBOL_GPL(ide_unregister_region);
-
void ide_init_disk(struct gendisk *disk, ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
@@ -999,7 +948,7 @@ static int hwif_init(ide_hwif_t *hwif)
return 0;
}
- if (register_blkdev(hwif->major, hwif->name))
+ if (__register_blkdev(hwif->major, hwif->name, ata_probe))
return 0;
if (!hwif->sg_max_nents)
@@ -1021,8 +970,6 @@ static int hwif_init(ide_hwif_t *hwif)
goto out;
}
- blk_register_region(MKDEV(hwif->major, 0), MAX_DRIVES << PARTN_BITS,
- THIS_MODULE, ata_probe, ata_lock, hwif);
return 1;
out:
@@ -1611,7 +1558,6 @@ static void ide_unregister(ide_hwif_t *hwif)
/*
* Remove us from the kernel's knowledge
*/
- blk_unregister_region(MKDEV(hwif->major, 0), MAX_DRIVES<<PARTN_BITS);
kfree(hwif->sg_table);
unregister_blkdev(hwif->major, hwif->name);
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 6f26634b22bb..88b96437b22e 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1822,7 +1822,6 @@ static void ide_tape_remove(ide_drive_t *drive)
ide_proc_unregister_driver(drive, tape->driver);
device_del(&tape->dev);
- ide_unregister_region(tape->disk);
mutex_lock(&idetape_ref_mutex);
put_device(&tape->dev);
@@ -2026,7 +2025,6 @@ static int ide_tape_probe(ide_drive_t *drive)
"n%s", tape->name);
g->fops = &idetape_block_ops;
- ide_register_region(g);
return 0;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d79335506ecd..3273360f30f7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -37,7 +37,7 @@
*/
/* un-comment DEBUG to enable pr_debug() statements */
-#define DEBUG
+/* #define DEBUG */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -963,6 +963,39 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.enter = NULL }
};
+/*
+ * Note, depending on HW and FW revision, SnowRidge SoC may or may not support
+ * C6, and this is indicated in the CPUID mwait leaf.
+ */
+static struct cpuidle_state snr_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 15,
+ .target_residency = 25,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 130,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1084,6 +1117,12 @@ static const struct idle_cpu idle_cpu_dnv __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_snr __initconst = {
+ .state_table = snr_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
@@ -1122,7 +1161,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_dnv),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
{}
};
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index b11c8c47ba2a..e946903b0993 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
flags, indio_dev->name, indio_dev);
if (ret)
- goto error_kfifo_free;
+ return ret;
indio_dev->setup_ops = setup_ops;
indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
return 0;
-
-error_kfifo_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
}
static const char * const chan_name_ain[] = {
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 0507283bd4c1..2dbd2646e44e 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -23,35 +23,31 @@
* @sdata: Sensor data.
*
* returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
*/
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
- struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+ struct st_sensor_data *sdata)
{
int ret, status;
/* How would I know if I can't check it? */
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
- return -EINVAL;
+ return true;
/* No scan mask, no interrupt */
if (!indio_dev->active_scan_mask)
- return 0;
+ return false;
ret = regmap_read(sdata->regmap,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
dev_err(sdata->dev, "error checking samples available\n");
- return ret;
+ return false;
}
- if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
- return 1;
-
- return 0;
+ return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
}
/**
@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
/* Tell the interrupt handler that we're dealing with edges */
if (irq_trig == IRQF_TRIGGER_FALLING ||
- irq_trig == IRQF_TRIGGER_RISING)
+ irq_trig == IRQF_TRIGGER_RISING) {
+ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+ dev_err(&indio_dev->dev,
+ "edge IRQ not supported w/o stat register.\n");
+ err = -EOPNOTSUPP;
+ goto iio_trigger_free;
+ }
sdata->edge_irq = true;
- else
+ } else {
/*
* If we're not using edges (i.e. level interrupts) we
* just mask off the IRQ, handle one interrupt, then
@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
* interrupt handler top half again and start over.
*/
irq_trig |= IRQF_ONESHOT;
+ }
/*
* If the interrupt pin is Open Drain, by definition this
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 28921b62e642..e9297c25d4ef 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
return ret;
if (pwr_down)
- st->pwr_down_mask |= (1 << chan->channel);
- else
st->pwr_down_mask &= ~(1 << chan->channel);
+ else
+ st->pwr_down_mask |= (1 << chan->channel);
ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index a2f820997afc..37fd0b65a014 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -601,7 +601,7 @@ static int sx9310_read_thresh(struct sx9310_data *data,
return ret;
regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
- if (regval > ARRAY_SIZE(sx9310_pthresh_codes))
+ if (regval >= ARRAY_SIZE(sx9310_pthresh_codes))
return -EINVAL;
*val = sx9310_pthresh_codes[regval];
@@ -1305,7 +1305,8 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
if (ret)
break;
- pos = min(max(ilog2(pos), 3), 10) - 3;
+ /* Powers of 2, except for a gap between 16 and 64 */
+ pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3);
reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK;
reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK,
pos);
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 503fe54a0bb9..608ccb1d8bc8 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
if (ret < 0)
return ret;
+ /*
+ * Give the mlx90632 some time to reset properly before sending a new I2C command
+ * if this is not done, the following I2C command(s) will not be accepted.
+ */
+ usleep_range(150, 200);
+
ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
(MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
(MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 5afd142fe8c7..98165589c8ab 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1251,7 +1251,8 @@ out:
EXPORT_SYMBOL(ib_cm_listen);
/**
- * Create a new listening ib_cm_id and listen on the given service ID.
+ * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
+ * the given service ID.
*
* If there's an existing ID listening on that same device and service ID,
* return it.
@@ -1765,7 +1766,7 @@ static u16 cm_get_bth_pkey(struct cm_work *work)
}
/**
- * Convert OPA SGID to IB SGID
+ * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
* ULPs (such as IPoIB) do not understand OPA GIDs and will
* reject them as the local_gid will not match the sgid. Therefore,
* change the pathrec's SGID to an IB SGID.
@@ -4273,8 +4274,8 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
group = container_of(obj, struct cm_counter_group, obj);
cm_attr = container_of(attr, struct cm_counter_attribute, attr);
- return sprintf(buf, "%ld\n",
- atomic_long_read(&group->counter[cm_attr->index]));
+ return sysfs_emit(buf, "%ld\n",
+ atomic_long_read(&group->counter[cm_attr->index]));
}
static const struct sysfs_ops cm_counter_ops = {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a77750b8954d..c51b84b2d2f3 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -477,6 +477,10 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
list_del(&id_priv->list);
cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL;
+ if (id_priv->id.route.addr.dev_addr.sgid_attr) {
+ rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
+ id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
+ }
mutex_unlock(&lock);
}
@@ -1861,9 +1865,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
kfree(id_priv->id.route.path_rec);
- if (id_priv->id.route.addr.dev_addr.sgid_attr)
- rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
-
put_net(id_priv->id.route.addr.dev_addr.net);
rdma_restrack_del(&id_priv->res);
kfree(id_priv);
@@ -2495,8 +2496,9 @@ static int cma_listen_handler(struct rdma_cm_id *id,
return id_priv->id.event_handler(id, event);
}
-static void cma_listen_on_dev(struct rdma_id_private *id_priv,
- struct cma_device *cma_dev)
+static int cma_listen_on_dev(struct rdma_id_private *id_priv,
+ struct cma_device *cma_dev,
+ struct rdma_id_private **to_destroy)
{
struct rdma_id_private *dev_id_priv;
struct net *net = id_priv->id.route.addr.dev_addr.net;
@@ -2504,21 +2506,21 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
lockdep_assert_held(&lock);
+ *to_destroy = NULL;
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
- return;
+ return 0;
dev_id_priv =
__rdma_create_id(net, cma_listen_handler, id_priv,
id_priv->id.ps, id_priv->id.qp_type, id_priv);
if (IS_ERR(dev_id_priv))
- return;
+ return PTR_ERR(dev_id_priv);
dev_id_priv->state = RDMA_CM_ADDR_BOUND;
memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
rdma_addr_size(cma_src_addr(id_priv)));
_cma_attach_to_dev(dev_id_priv, cma_dev);
- list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
cma_id_get(id_priv);
dev_id_priv->internal_id = 1;
dev_id_priv->afonly = id_priv->afonly;
@@ -2527,19 +2529,42 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
if (ret)
- dev_warn(&cma_dev->device->dev,
- "RDMA CMA: cma_listen_on_dev, error %d\n", ret);
+ goto err_listen;
+ list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
+ return 0;
+err_listen:
+ /* Caller must destroy this after releasing lock */
+ *to_destroy = dev_id_priv;
+ dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret);
+ return ret;
}
-static void cma_listen_on_all(struct rdma_id_private *id_priv)
+static int cma_listen_on_all(struct rdma_id_private *id_priv)
{
+ struct rdma_id_private *to_destroy;
struct cma_device *cma_dev;
+ int ret;
mutex_lock(&lock);
list_add_tail(&id_priv->list, &listen_any_list);
- list_for_each_entry(cma_dev, &dev_list, list)
- cma_listen_on_dev(id_priv, cma_dev);
+ list_for_each_entry(cma_dev, &dev_list, list) {
+ ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
+ if (ret) {
+ /* Prevent racing with cma_process_remove() */
+ if (to_destroy)
+ list_del_init(&to_destroy->list);
+ goto err_listen;
+ }
+ }
mutex_unlock(&lock);
+ return 0;
+
+err_listen:
+ list_del(&id_priv->list);
+ mutex_unlock(&lock);
+ if (to_destroy)
+ rdma_destroy_id(&to_destroy->id);
+ return ret;
}
void rdma_set_service_type(struct rdma_cm_id *id, int tos)
@@ -3692,8 +3717,11 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
ret = -ENOSYS;
goto err;
}
- } else
- cma_listen_on_all(id_priv);
+ } else {
+ ret = cma_listen_on_all(id_priv);
+ if (ret)
+ goto err;
+ }
return 0;
err:
@@ -4773,69 +4801,6 @@ static struct notifier_block cma_nb = {
.notifier_call = cma_netdev_callback
};
-static int cma_add_one(struct ib_device *device)
-{
- struct cma_device *cma_dev;
- struct rdma_id_private *id_priv;
- unsigned int i;
- unsigned long supported_gids = 0;
- int ret;
-
- cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
- if (!cma_dev)
- return -ENOMEM;
-
- cma_dev->device = device;
- cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
- sizeof(*cma_dev->default_gid_type),
- GFP_KERNEL);
- if (!cma_dev->default_gid_type) {
- ret = -ENOMEM;
- goto free_cma_dev;
- }
-
- cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
- sizeof(*cma_dev->default_roce_tos),
- GFP_KERNEL);
- if (!cma_dev->default_roce_tos) {
- ret = -ENOMEM;
- goto free_gid_type;
- }
-
- rdma_for_each_port (device, i) {
- supported_gids = roce_gid_type_mask_support(device, i);
- WARN_ON(!supported_gids);
- if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
- cma_dev->default_gid_type[i - rdma_start_port(device)] =
- CMA_PREFERRED_ROCE_GID_TYPE;
- else
- cma_dev->default_gid_type[i - rdma_start_port(device)] =
- find_first_bit(&supported_gids, BITS_PER_LONG);
- cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
- }
-
- init_completion(&cma_dev->comp);
- refcount_set(&cma_dev->refcount, 1);
- INIT_LIST_HEAD(&cma_dev->id_list);
- ib_set_client_data(device, &cma_client, cma_dev);
-
- mutex_lock(&lock);
- list_add_tail(&cma_dev->list, &dev_list);
- list_for_each_entry(id_priv, &listen_any_list, list)
- cma_listen_on_dev(id_priv, cma_dev);
- mutex_unlock(&lock);
-
- trace_cm_add_one(device);
- return 0;
-
-free_gid_type:
- kfree(cma_dev->default_gid_type);
-
-free_cma_dev:
- kfree(cma_dev);
- return ret;
-}
-
static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
{
struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
@@ -4898,6 +4863,80 @@ static void cma_process_remove(struct cma_device *cma_dev)
wait_for_completion(&cma_dev->comp);
}
+static int cma_add_one(struct ib_device *device)
+{
+ struct rdma_id_private *to_destroy;
+ struct cma_device *cma_dev;
+ struct rdma_id_private *id_priv;
+ unsigned int i;
+ unsigned long supported_gids = 0;
+ int ret;
+
+ cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
+ if (!cma_dev)
+ return -ENOMEM;
+
+ cma_dev->device = device;
+ cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
+ sizeof(*cma_dev->default_gid_type),
+ GFP_KERNEL);
+ if (!cma_dev->default_gid_type) {
+ ret = -ENOMEM;
+ goto free_cma_dev;
+ }
+
+ cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
+ sizeof(*cma_dev->default_roce_tos),
+ GFP_KERNEL);
+ if (!cma_dev->default_roce_tos) {
+ ret = -ENOMEM;
+ goto free_gid_type;
+ }
+
+ rdma_for_each_port (device, i) {
+ supported_gids = roce_gid_type_mask_support(device, i);
+ WARN_ON(!supported_gids);
+ if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
+ cma_dev->default_gid_type[i - rdma_start_port(device)] =
+ CMA_PREFERRED_ROCE_GID_TYPE;
+ else
+ cma_dev->default_gid_type[i - rdma_start_port(device)] =
+ find_first_bit(&supported_gids, BITS_PER_LONG);
+ cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
+ }
+
+ init_completion(&cma_dev->comp);
+ refcount_set(&cma_dev->refcount, 1);
+ INIT_LIST_HEAD(&cma_dev->id_list);
+ ib_set_client_data(device, &cma_client, cma_dev);
+
+ mutex_lock(&lock);
+ list_add_tail(&cma_dev->list, &dev_list);
+ list_for_each_entry(id_priv, &listen_any_list, list) {
+ ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
+ if (ret)
+ goto free_listen;
+ }
+ mutex_unlock(&lock);
+
+ trace_cm_add_one(device);
+ return 0;
+
+free_listen:
+ list_del(&cma_dev->list);
+ mutex_unlock(&lock);
+
+ /* cma_process_remove() will delete to_destroy */
+ cma_process_remove(cma_dev);
+ kfree(cma_dev->default_roce_tos);
+free_gid_type:
+ kfree(cma_dev->default_gid_type);
+
+free_cma_dev:
+ kfree(cma_dev);
+ return ret;
+}
+
static void cma_remove_one(struct ib_device *device, void *client_data)
{
struct cma_device *cma_dev = client_data;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 7ec4af2ed87a..97a77ea8d3c9 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -115,7 +115,7 @@ static ssize_t default_roce_mode_show(struct config_item *item,
if (gid_type < 0)
return gid_type;
- return sprintf(buf, "%s\n", ib_cache_gid_type_str(gid_type));
+ return sysfs_emit(buf, "%s\n", ib_cache_gid_type_str(gid_type));
}
static ssize_t default_roce_mode_store(struct config_item *item,
@@ -131,8 +131,10 @@ static ssize_t default_roce_mode_store(struct config_item *item,
return ret;
gid_type = ib_cache_gid_parse_type_str(buf);
- if (gid_type < 0)
+ if (gid_type < 0) {
+ cma_configfs_params_put(cma_dev);
return -EINVAL;
+ }
ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
@@ -157,7 +159,7 @@ static ssize_t default_roce_tos_show(struct config_item *item, char *buf)
tos = cma_get_default_roce_tos(cma_dev, group->port_num);
cma_configfs_params_put(cma_dev);
- return sprintf(buf, "%u\n", tos);
+ return sysfs_emit(buf, "%u\n", tos);
}
static ssize_t default_roce_tos_store(struct config_item *item,
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index e84b0fedaacb..315f7a297eee 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -318,15 +318,12 @@ struct ib_device *ib_device_get_by_index(const struct net *net, u32 index);
void nldev_init(void);
void nldev_exit(void);
-static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
- struct ib_pd *pd,
- struct ib_qp_init_attr *attr,
- struct ib_udata *udata,
- struct ib_uqp_object *uobj)
+static inline struct ib_qp *
+_ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr, struct ib_udata *udata,
+ struct ib_uqp_object *uobj, const char *caller)
{
- enum ib_qp_type qp_type = attr->qp_type;
struct ib_qp *qp;
- bool is_xrc;
if (!dev->ops.create_qp)
return ERR_PTR(-EOPNOTSUPP);
@@ -347,6 +344,7 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
qp->srq = attr->srq;
qp->rwq_ind_tbl = attr->rwq_ind_tbl;
qp->event_handler = attr->event_handler;
+ qp->port = attr->port_num;
atomic_set(&qp->usecnt, 0);
spin_lock_init(&qp->mr_lock);
@@ -354,16 +352,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
INIT_LIST_HEAD(&qp->sig_mrs);
rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
- /*
- * We don't track XRC QPs for now, because they don't have PD
- * and more importantly they are created internaly by driver,
- * see mlx5 create_dev_resources() as an example.
- */
- is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
- if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
- rdma_restrack_parent_name(&qp->res, &pd->res);
- rdma_restrack_add(&qp->res);
- }
+ WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
+ rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
+ rdma_restrack_add(&qp->res);
return qp;
}
@@ -411,7 +402,6 @@ void rdma_umap_priv_init(struct rdma_umap_priv *priv,
struct vm_area_struct *vma,
struct rdma_user_mmap_entry *entry);
-void ib_cq_pool_init(struct ib_device *dev);
-void ib_cq_pool_destroy(struct ib_device *dev);
+void ib_cq_pool_cleanup(struct ib_device *dev);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index e4ff0d3328b6..92745522250e 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -64,8 +64,40 @@ out:
return ret;
}
-static struct rdma_counter *rdma_counter_alloc(struct ib_device *dev, u8 port,
- enum rdma_nl_counter_mode mode)
+static void auto_mode_init_counter(struct rdma_counter *counter,
+ const struct ib_qp *qp,
+ enum rdma_nl_counter_mask new_mask)
+{
+ struct auto_mode_param *param = &counter->mode.param;
+
+ counter->mode.mode = RDMA_COUNTER_MODE_AUTO;
+ counter->mode.mask = new_mask;
+
+ if (new_mask & RDMA_COUNTER_MASK_QP_TYPE)
+ param->qp_type = qp->qp_type;
+}
+
+static int __rdma_counter_bind_qp(struct rdma_counter *counter,
+ struct ib_qp *qp)
+{
+ int ret;
+
+ if (qp->counter)
+ return -EINVAL;
+
+ if (!qp->device->ops.counter_bind_qp)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&counter->lock);
+ ret = qp->device->ops.counter_bind_qp(counter, qp);
+ mutex_unlock(&counter->lock);
+
+ return ret;
+}
+
+static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u8 port,
+ struct ib_qp *qp,
+ enum rdma_nl_counter_mode mode)
{
struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
@@ -88,11 +120,22 @@ static struct rdma_counter *rdma_counter_alloc(struct ib_device *dev, u8 port,
port_counter = &dev->port_data[port].port_counter;
mutex_lock(&port_counter->lock);
- if (mode == RDMA_COUNTER_MODE_MANUAL) {
+ switch (mode) {
+ case RDMA_COUNTER_MODE_MANUAL:
ret = __counter_set_mode(&port_counter->mode,
RDMA_COUNTER_MODE_MANUAL, 0);
- if (ret)
+ if (ret) {
+ mutex_unlock(&port_counter->lock);
goto err_mode;
+ }
+ break;
+ case RDMA_COUNTER_MODE_AUTO:
+ auto_mode_init_counter(counter, qp, port_counter->mode.mask);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ mutex_unlock(&port_counter->lock);
+ goto err_mode;
}
port_counter->num_counters++;
@@ -102,10 +145,15 @@ static struct rdma_counter *rdma_counter_alloc(struct ib_device *dev, u8 port,
kref_init(&counter->kref);
mutex_init(&counter->lock);
+ ret = __rdma_counter_bind_qp(counter, qp);
+ if (ret)
+ goto err_mode;
+
+ rdma_restrack_parent_name(&counter->res, &qp->res);
+ rdma_restrack_add(&counter->res);
return counter;
err_mode:
- mutex_unlock(&port_counter->lock);
kfree(counter->stats);
err_stats:
rdma_restrack_put(&counter->res);
@@ -132,19 +180,6 @@ static void rdma_counter_free(struct rdma_counter *counter)
kfree(counter);
}
-static void auto_mode_init_counter(struct rdma_counter *counter,
- const struct ib_qp *qp,
- enum rdma_nl_counter_mask new_mask)
-{
- struct auto_mode_param *param = &counter->mode.param;
-
- counter->mode.mode = RDMA_COUNTER_MODE_AUTO;
- counter->mode.mask = new_mask;
-
- if (new_mask & RDMA_COUNTER_MASK_QP_TYPE)
- param->qp_type = qp->qp_type;
-}
-
static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
enum rdma_nl_counter_mask auto_mask)
{
@@ -161,24 +196,6 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
return match;
}
-static int __rdma_counter_bind_qp(struct rdma_counter *counter,
- struct ib_qp *qp)
-{
- int ret;
-
- if (qp->counter)
- return -EINVAL;
-
- if (!qp->device->ops.counter_bind_qp)
- return -EOPNOTSUPP;
-
- mutex_lock(&counter->lock);
- ret = qp->device->ops.counter_bind_qp(counter, qp);
- mutex_unlock(&counter->lock);
-
- return ret;
-}
-
static int __rdma_counter_unbind_qp(struct ib_qp *qp)
{
struct rdma_counter *counter = qp->counter;
@@ -247,13 +264,6 @@ next:
return counter;
}
-static void rdma_counter_res_add(struct rdma_counter *counter,
- struct ib_qp *qp)
-{
- rdma_restrack_parent_name(&counter->res, &qp->res);
- rdma_restrack_add(&counter->res);
-}
-
static void counter_release(struct kref *kref)
{
struct rdma_counter *counter;
@@ -275,7 +285,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
struct rdma_counter *counter;
int ret;
- if (!qp->res.valid || rdma_is_kernel_res(&qp->res))
+ if (!rdma_restrack_is_tracked(&qp->res) || rdma_is_kernel_res(&qp->res))
return 0;
if (!rdma_is_port_valid(dev, port))
@@ -293,19 +303,9 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
return ret;
}
} else {
- counter = rdma_counter_alloc(dev, port, RDMA_COUNTER_MODE_AUTO);
+ counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_AUTO);
if (!counter)
return -ENOMEM;
-
- auto_mode_init_counter(counter, qp, port_counter->mode.mask);
-
- ret = __rdma_counter_bind_qp(counter, qp);
- if (ret) {
- rdma_counter_free(counter);
- return ret;
- }
-
- rdma_counter_res_add(counter, qp);
}
return 0;
@@ -419,15 +419,6 @@ err:
return NULL;
}
-static int rdma_counter_bind_qp_manual(struct rdma_counter *counter,
- struct ib_qp *qp)
-{
- if ((counter->device != qp->device) || (counter->port != qp->port))
- return -EINVAL;
-
- return __rdma_counter_bind_qp(counter, qp);
-}
-
static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
u32 counter_id)
{
@@ -475,7 +466,12 @@ int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
goto err_task;
}
- ret = rdma_counter_bind_qp_manual(counter, qp);
+ if ((counter->device != qp->device) || (counter->port != qp->port)) {
+ ret = -EINVAL;
+ goto err_task;
+ }
+
+ ret = __rdma_counter_bind_qp(counter, qp);
if (ret)
goto err_task;
@@ -520,26 +516,18 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
goto err;
}
- counter = rdma_counter_alloc(dev, port, RDMA_COUNTER_MODE_MANUAL);
+ counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_MANUAL);
if (!counter) {
ret = -ENOMEM;
goto err;
}
- ret = rdma_counter_bind_qp_manual(counter, qp);
- if (ret)
- goto err_bind;
-
if (counter_id)
*counter_id = counter->id;
- rdma_counter_res_add(counter, qp);
-
rdma_restrack_put(&qp->res);
- return ret;
+ return 0;
-err_bind:
- rdma_counter_free(counter);
err:
rdma_restrack_put(&qp->res);
return ret;
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 12ebacf52958..433b426729d4 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -123,7 +123,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
}
/**
- * ib_process_direct_cq - process a CQ in caller context
+ * ib_process_cq_direct - process a CQ in caller context
* @cq: CQ to process
* @budget: number of CQEs to poll for
*
@@ -197,7 +197,7 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
}
/**
- * __ib_alloc_cq allocate a completion queue
+ * __ib_alloc_cq - allocate a completion queue
* @dev: device to allocate the CQ for
* @private: driver private data, accessible from cq->cq_context
* @nr_cqe: number of CQEs to allocate
@@ -349,16 +349,7 @@ void ib_free_cq(struct ib_cq *cq)
}
EXPORT_SYMBOL(ib_free_cq);
-void ib_cq_pool_init(struct ib_device *dev)
-{
- unsigned int i;
-
- spin_lock_init(&dev->cq_pools_lock);
- for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++)
- INIT_LIST_HEAD(&dev->cq_pools[i]);
-}
-
-void ib_cq_pool_destroy(struct ib_device *dev)
+void ib_cq_pool_cleanup(struct ib_device *dev)
{
struct ib_cq *cq, *n;
unsigned int i;
@@ -367,6 +358,7 @@ void ib_cq_pool_destroy(struct ib_device *dev)
list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
pool_entry) {
WARN_ON(cq->cqe_used);
+ list_del(&cq->pool_entry);
cq->shared = false;
ib_free_cq(cq);
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a3b1fc84cdca..e96f979e6d52 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -284,6 +284,7 @@ static void ib_device_check_mandatory(struct ib_device *device)
IB_MANDATORY_FUNC(poll_cq),
IB_MANDATORY_FUNC(req_notify_cq),
IB_MANDATORY_FUNC(get_dma_mr),
+ IB_MANDATORY_FUNC(reg_user_mr),
IB_MANDATORY_FUNC(dereg_mr),
IB_MANDATORY_FUNC(get_port_immutable)
};
@@ -569,6 +570,7 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
struct ib_device *_ib_alloc_device(size_t size)
{
struct ib_device *device;
+ unsigned int i;
if (WARN_ON(size < sizeof(struct ib_device)))
return NULL;
@@ -600,6 +602,41 @@ struct ib_device *_ib_alloc_device(size_t size)
init_completion(&device->unreg_completion);
INIT_WORK(&device->unregistration_work, ib_unregister_work);
+ spin_lock_init(&device->cq_pools_lock);
+ for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++)
+ INIT_LIST_HEAD(&device->cq_pools[i]);
+
+ device->uverbs_cmd_mask =
+ BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
+ BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
+ BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ BIT_ULL(IB_USER_VERBS_CMD_CLOSE_XRCD) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_CREATE_XSRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
+ BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
+ BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
+ BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
+ BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) |
+ BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
+ BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_OPEN_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_OPEN_XRCD) |
+ BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
+ BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
+ BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) |
+ BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
+ BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
+ BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ);
return device;
}
EXPORT_SYMBOL(_ib_alloc_device);
@@ -1177,25 +1214,6 @@ out:
return ret;
}
-static void setup_dma_device(struct ib_device *device,
- struct device *dma_device)
-{
- /*
- * If the caller does not provide a DMA capable device then the IB
- * device will be used. In this case the caller should fully setup the
- * ibdev for DMA. This usually means using dma_virt_ops.
- */
-#ifdef CONFIG_DMA_VIRT_OPS
- if (!dma_device) {
- device->dev.dma_ops = &dma_virt_ops;
- dma_device = &device->dev;
- }
-#endif
- WARN_ON(!dma_device);
- device->dma_device = dma_device;
- WARN_ON(!device->dma_device->dma_parms);
-}
-
/*
* setup_device() allocates memory and sets up data that requires calling the
* device ops, this is the only reason these actions are not done during
@@ -1249,7 +1267,7 @@ static void disable_device(struct ib_device *device)
remove_client_context(device, cid);
}
- ib_cq_pool_destroy(device);
+ ib_cq_pool_cleanup(device);
/* Pairs with refcount_set in enable_device */
ib_device_put(device);
@@ -1294,8 +1312,6 @@ static int enable_device_and_get(struct ib_device *device)
goto out;
}
- ib_cq_pool_init(device);
-
down_read(&clients_rwsem);
xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
ret = add_client_context(device, client);
@@ -1341,7 +1357,14 @@ int ib_register_device(struct ib_device *device, const char *name,
if (ret)
return ret;
- setup_dma_device(device, dma_device);
+ /*
+ * If the caller does not provide a DMA capable device then the IB core
+ * will set up ib_sge and scatterlist structures that stash the kernel
+ * virtual address into the address field.
+ */
+ WARN_ON(dma_device && !dma_device->dma_parms);
+ device->dma_device = dma_device;
+
ret = setup_device(device);
if (ret)
return ret;
@@ -1374,9 +1397,6 @@ int ib_register_device(struct ib_device *device, const char *name,
}
ret = enable_device_and_get(device);
- dev_set_uevent_suppress(&device->dev, false);
- /* Mark for userspace that device is ready */
- kobject_uevent(&device->dev.kobj, KOBJ_ADD);
if (ret) {
void (*dealloc_fn)(struct ib_device *);
@@ -1396,8 +1416,12 @@ int ib_register_device(struct ib_device *device, const char *name,
ib_device_put(device);
__ib_unregister_device(device);
device->ops.dealloc_driver = dealloc_fn;
+ dev_set_uevent_suppress(&device->dev, false);
return ret;
}
+ dev_set_uevent_suppress(&device->dev, false);
+ /* Mark for userspace that device is ready */
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
ib_device_put(device);
return 0;
@@ -2576,6 +2600,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
SET_DEVICE_OP(dev_ops, create_srq);
+ SET_DEVICE_OP(dev_ops, create_user_ah);
SET_DEVICE_OP(dev_ops, create_wq);
SET_DEVICE_OP(dev_ops, dealloc_dm);
SET_DEVICE_OP(dev_ops, dealloc_driver);
@@ -2675,6 +2700,21 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
}
EXPORT_SYMBOL(ib_set_device_ops);
+#ifdef CONFIG_INFINIBAND_VIRT_DMA
+int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ sg_dma_address(s) = (uintptr_t)sg_virt(s);
+ sg_dma_len(s) = s->length;
+ }
+ return nents;
+}
+EXPORT_SYMBOL(ib_dma_virt_map_sg);
+#endif /* CONFIG_INFINIBAND_VIRT_DMA */
+
static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
[RDMA_NL_LS_OP_RESOLVE] = {
.doit = ib_nl_handle_resolve_resp,
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 1bf87d9fd0bd..eeb8e6010907 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -141,7 +141,7 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
int iwpm_get_nlmsg_seq(void);
/**
- * iwpm_add_reminfo - Add remote address info of the connecting peer
+ * iwpm_add_remote_info - Add remote address info of the connecting peer
* to the remote info hash table
* @reminfo: The remote info to be added
*/
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index ffe11b03724c..75eafd9208aa 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -137,15 +137,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
} else if (uobj->object) {
ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
attrs);
- if (ret) {
- if (ib_is_destroy_retryable(ret, reason, uobj))
- return ret;
-
- /* Nothing to be done, dangle the memory and move on */
- WARN(true,
- "ib_uverbs: failed to remove uobject id %d, driver err=%d",
- uobj->id, ret);
- }
+ if (ret)
+ /* Nothing to be done, wait till ucontext will clean it */
+ return ret;
uobj->object = NULL;
}
@@ -543,12 +537,7 @@ static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
struct uverbs_obj_idr_type, type);
int ret = idr_type->destroy_object(uobj, why, attrs);
- /*
- * We can only fail gracefully if the user requested to destroy the
- * object or when a retry may be called upon an error.
- * In the rest of the cases, just remove whatever you can.
- */
- if (ib_is_destroy_retryable(ret, why, uobj))
+ if (ret)
return ret;
if (why == RDMA_REMOVE_ABORT)
@@ -581,11 +570,8 @@ static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
{
const struct uverbs_obj_fd_type *fd_type = container_of(
uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
- int ret = fd_type->destroy_object(uobj, why);
-
- if (ib_is_destroy_retryable(ret, why, uobj))
- return ret;
+ fd_type->destroy_object(uobj, why);
return 0;
}
@@ -609,6 +595,27 @@ static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
WARN_ON(old != NULL);
}
+static void swap_idr_uobjects(struct ib_uobject *obj_old,
+ struct ib_uobject *obj_new)
+{
+ struct ib_uverbs_file *ufile = obj_old->ufile;
+ void *old;
+
+ /*
+ * New must be an object that been allocated but not yet committed, this
+ * moves the pre-committed state to obj_old, new still must be comitted.
+ */
+ old = xa_cmpxchg(&ufile->idr, obj_old->id, obj_old, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ if (WARN_ON(old != obj_old))
+ return;
+
+ swap(obj_old->id, obj_new->id);
+
+ old = xa_cmpxchg(&ufile->idr, obj_old->id, NULL, obj_old, GFP_KERNEL);
+ WARN_ON(old != NULL);
+}
+
static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
{
int fd = uobj->id;
@@ -655,6 +662,35 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
}
/*
+ * new_uobj will be assigned to the handle currently used by to_uobj, and
+ * to_uobj will be destroyed.
+ *
+ * Upon return the caller must do:
+ * rdma_alloc_commit_uobject(new_uobj)
+ * uobj_put_destroy(to_uobj)
+ *
+ * to_uobj must have a write get but the put mode switches to destroy once
+ * this is called.
+ */
+void rdma_assign_uobject(struct ib_uobject *to_uobj, struct ib_uobject *new_uobj,
+ struct uverbs_attr_bundle *attrs)
+{
+ assert_uverbs_usecnt(new_uobj, UVERBS_LOOKUP_WRITE);
+
+ if (WARN_ON(to_uobj->uapi_object != new_uobj->uapi_object ||
+ !to_uobj->uapi_object->type_class->swap_uobjects))
+ return;
+
+ to_uobj->uapi_object->type_class->swap_uobjects(to_uobj, new_uobj);
+
+ /*
+ * If this fails then the uobject is still completely valid (though with
+ * a new ID) and we leak it until context close.
+ */
+ uverbs_destroy_uobject(to_uobj, RDMA_REMOVE_DESTROY, attrs);
+}
+
+/*
* This consumes the kref for uobj. It is up to the caller to unwind the HW
* object and anything else connected to uobj before calling this.
*/
@@ -761,6 +797,7 @@ const struct uverbs_obj_type_class uverbs_idr_class = {
.lookup_put = lookup_put_idr_uobject,
.destroy_hw = destroy_hw_idr_uobject,
.remove_handle = remove_handle_idr_uobject,
+ .swap_uobjects = swap_idr_uobjects,
};
EXPORT_SYMBOL(uverbs_idr_class);
@@ -863,11 +900,18 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
* racing with a lookup_get.
*/
WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
+ if (reason == RDMA_REMOVE_DRIVER_FAILURE)
+ obj->object = NULL;
if (!uverbs_destroy_uobject(obj, reason, &attrs))
ret = 0;
else
atomic_set(&obj->usecnt, 0);
}
+
+ if (reason == RDMA_REMOVE_DRIVER_FAILURE) {
+ WARN_ON(!list_empty(&ufile->uobjects));
+ return 0;
+ }
return ret;
}
@@ -889,21 +933,12 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
if (!ufile->ucontext)
goto done;
- ufile->ucontext->cleanup_retryable = true;
- while (!list_empty(&ufile->uobjects))
- if (__uverbs_cleanup_ufile(ufile, reason)) {
- /*
- * No entry was cleaned-up successfully during this
- * iteration. It is a driver bug to fail destruction.
- */
- WARN_ON(!list_empty(&ufile->uobjects));
- break;
- }
-
- ufile->ucontext->cleanup_retryable = false;
- if (!list_empty(&ufile->uobjects))
- __uverbs_cleanup_ufile(ufile, reason);
+ while (!list_empty(&ufile->uobjects) &&
+ !__uverbs_cleanup_ufile(ufile, reason)) {
+ }
+ if (WARN_ON(!list_empty(&ufile->uobjects)))
+ __uverbs_cleanup_ufile(ufile, RDMA_REMOVE_DRIVER_FAILURE);
ufile_destroy_ucontext(ufile, reason);
done:
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 4aeeaaed0f17..ff1551b3cf61 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -221,19 +221,29 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
{
struct ib_device *dev = res_to_dev(res);
struct rdma_restrack_root *rt;
- int ret;
+ int ret = 0;
if (!dev)
return;
+ if (res->no_track)
+ goto out;
+
rt = &dev->res[res->type];
if (res->type == RDMA_RESTRACK_QP) {
/* Special case to ensure that LQPN points to right QP */
struct ib_qp *qp = container_of(res, struct ib_qp, res);
- ret = xa_insert(&rt->xa, qp->qp_num, res, GFP_KERNEL);
- res->id = ret ? 0 : qp->qp_num;
+ WARN_ONCE(qp->qp_num >> 24 || qp->port >> 8,
+ "QP number 0x%0X and port 0x%0X", qp->qp_num,
+ qp->port);
+ res->id = qp->qp_num;
+ if (qp->qp_type == IB_QPT_SMI || qp->qp_type == IB_QPT_GSI)
+ res->id |= qp->port << 24;
+ ret = xa_insert(&rt->xa, res->id, res, GFP_KERNEL);
+ if (ret)
+ res->id = 0;
} else if (res->type == RDMA_RESTRACK_COUNTER) {
/* Special case to ensure that cntn points to right counter */
struct rdma_counter *counter;
@@ -244,8 +254,10 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
} else {
ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
&rt->next_id, GFP_KERNEL);
+ ret = (ret < 0) ? ret : 0;
}
+out:
if (!ret)
res->valid = true;
}
@@ -318,6 +330,9 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
return;
}
+ if (res->no_track)
+ goto out;
+
dev = res_to_dev(res);
if (WARN_ON(!dev))
return;
@@ -328,8 +343,9 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
if (res->type == RDMA_RESTRACK_MR || res->type == RDMA_RESTRACK_QP)
return;
WARN_ON(old != res);
- res->valid = false;
+out:
+ res->valid = false;
rdma_restrack_put(res);
wait_for_completion(&res->comp);
}
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 13f43ab7220b..a96030b784eb 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -285,8 +285,11 @@ static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
u32 sg_cnt, enum dma_data_direction dir)
{
- if (is_pci_p2pdma_page(sg_page(sg)))
+ if (is_pci_p2pdma_page(sg_page(sg))) {
+ if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
+ return 0;
return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+ }
return ib_dma_map_sg(dev, sg, sg_cnt, dir);
}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8c930bf1df89..89a831fa1885 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1435,7 +1435,8 @@ enum opa_pr_supported {
};
/**
- * Check if current PR query can be an OPA query.
+ * opa_pr_query_possible - Check if current PR query can be an OPA query.
+ *
* Retuns PR_NOT_SUPPORTED if a path record query is not
* possible, PR_OPA_SUPPORTED if an OPA path record query
* is possible and PR_IB_SUPPORTED if an IB path record
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 914cddea525d..b8abb30f80df 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -165,9 +165,11 @@ static ssize_t state_show(struct ib_port *p, struct port_attribute *unused,
if (ret)
return ret;
- return sprintf(buf, "%d: %s\n", attr.state,
- attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
- state_name[attr.state] : "UNKNOWN");
+ return sysfs_emit(buf, "%d: %s\n", attr.state,
+ attr.state >= 0 &&
+ attr.state < ARRAY_SIZE(state_name) ?
+ state_name[attr.state] :
+ "UNKNOWN");
}
static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
@@ -180,7 +182,7 @@ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
if (ret)
return ret;
- return sprintf(buf, "0x%x\n", attr.lid);
+ return sysfs_emit(buf, "0x%x\n", attr.lid);
}
static ssize_t lid_mask_count_show(struct ib_port *p,
@@ -194,7 +196,7 @@ static ssize_t lid_mask_count_show(struct ib_port *p,
if (ret)
return ret;
- return sprintf(buf, "%d\n", attr.lmc);
+ return sysfs_emit(buf, "%d\n", attr.lmc);
}
static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
@@ -207,7 +209,7 @@ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
if (ret)
return ret;
- return sprintf(buf, "0x%x\n", attr.sm_lid);
+ return sysfs_emit(buf, "0x%x\n", attr.sm_lid);
}
static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
@@ -220,7 +222,7 @@ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
if (ret)
return ret;
- return sprintf(buf, "%d\n", attr.sm_sl);
+ return sysfs_emit(buf, "%d\n", attr.sm_sl);
}
static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
@@ -233,7 +235,7 @@ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
if (ret)
return ret;
- return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
+ return sysfs_emit(buf, "0x%08x\n", attr.port_cap_flags);
}
static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
@@ -273,6 +275,10 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
speed = " HDR";
rate = 500;
break;
+ case IB_SPEED_NDR:
+ speed = " NDR";
+ rate = 1000;
+ break;
case IB_SPEED_SDR:
default: /* default to SDR for invalid rates */
speed = " SDR";
@@ -284,9 +290,9 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
if (rate < 0)
return -EINVAL;
- return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
- rate / 10, rate % 10 ? ".5" : "",
- ib_width_enum_to_int(attr.active_width), speed);
+ return sysfs_emit(buf, "%d%s Gb/sec (%dX%s)\n", rate / 10,
+ rate % 10 ? ".5" : "",
+ ib_width_enum_to_int(attr.active_width), speed);
}
static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
@@ -318,21 +324,28 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
if (ret)
return ret;
- return sprintf(buf, "%d: %s\n", attr.phys_state,
- phys_state_to_str(attr.phys_state));
+ return sysfs_emit(buf, "%d: %s\n", attr.phys_state,
+ phys_state_to_str(attr.phys_state));
}
static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
char *buf)
{
+ const char *output;
+
switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) {
case IB_LINK_LAYER_INFINIBAND:
- return sprintf(buf, "%s\n", "InfiniBand");
+ output = "InfiniBand";
+ break;
case IB_LINK_LAYER_ETHERNET:
- return sprintf(buf, "%s\n", "Ethernet");
+ output = "Ethernet";
+ break;
default:
- return sprintf(buf, "%s\n", "Unknown");
+ output = "Unknown";
+ break;
}
+
+ return sysfs_emit(buf, "%s\n", output);
}
static PORT_ATTR_RO(state);
@@ -358,27 +371,28 @@ static struct attribute *port_default_attrs[] = {
NULL
};
-static size_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
+static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
{
struct net_device *ndev;
- size_t ret = -EINVAL;
+ int ret = -EINVAL;
rcu_read_lock();
ndev = rcu_dereference(gid_attr->ndev);
if (ndev)
- ret = sprintf(buf, "%s\n", ndev->name);
+ ret = sysfs_emit(buf, "%s\n", ndev->name);
rcu_read_unlock();
return ret;
}
-static size_t print_gid_type(const struct ib_gid_attr *gid_attr, char *buf)
+static ssize_t print_gid_type(const struct ib_gid_attr *gid_attr, char *buf)
{
- return sprintf(buf, "%s\n", ib_cache_gid_type_str(gid_attr->gid_type));
+ return sysfs_emit(buf, "%s\n",
+ ib_cache_gid_type_str(gid_attr->gid_type));
}
static ssize_t _show_port_gid_attr(
struct ib_port *p, struct port_attribute *attr, char *buf,
- size_t (*print)(const struct ib_gid_attr *gid_attr, char *buf))
+ ssize_t (*print)(const struct ib_gid_attr *gid_attr, char *buf))
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
@@ -401,7 +415,7 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
const struct ib_gid_attr *gid_attr;
- ssize_t ret;
+ int len;
gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
if (IS_ERR(gid_attr)) {
@@ -416,12 +430,12 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
* space throwing such error on fail to read gid, return zero
* GID as before. This maintains backward compatibility.
*/
- return sprintf(buf, "%pI6\n", zgid.raw);
+ return sysfs_emit(buf, "%pI6\n", zgid.raw);
}
- ret = sprintf(buf, "%pI6\n", gid_attr->gid.raw);
+ len = sysfs_emit(buf, "%pI6\n", gid_attr->gid.raw);
rdma_put_gid_attr(gid_attr);
- return ret;
+ return len;
}
static ssize_t show_port_gid_attr_ndev(struct ib_port *p,
@@ -443,13 +457,13 @@ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
u16 pkey;
- ssize_t ret;
+ int ret;
ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
if (ret)
return ret;
- return sprintf(buf, "0x%04x\n", pkey);
+ return sysfs_emit(buf, "0x%04x\n", pkey);
}
#define PORT_PMA_ATTR(_name, _counter, _width, _offset) \
@@ -521,8 +535,9 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
container_of(attr, struct port_table_attribute, attr);
int offset = tab_attr->index & 0xffff;
int width = (tab_attr->index >> 16) & 0xff;
- ssize_t ret;
+ int ret;
u8 data[8];
+ int len;
ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data,
40 + offset / 8, sizeof(data));
@@ -531,30 +546,27 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
switch (width) {
case 4:
- ret = sprintf(buf, "%u\n", (*data >>
- (4 - (offset % 8))) & 0xf);
+ len = sysfs_emit(buf, "%u\n",
+ (*data >> (4 - (offset % 8))) & 0xf);
break;
case 8:
- ret = sprintf(buf, "%u\n", *data);
+ len = sysfs_emit(buf, "%u\n", *data);
break;
case 16:
- ret = sprintf(buf, "%u\n",
- be16_to_cpup((__be16 *)data));
+ len = sysfs_emit(buf, "%u\n", be16_to_cpup((__be16 *)data));
break;
case 32:
- ret = sprintf(buf, "%u\n",
- be32_to_cpup((__be32 *)data));
+ len = sysfs_emit(buf, "%u\n", be32_to_cpup((__be32 *)data));
break;
case 64:
- ret = sprintf(buf, "%llu\n",
- be64_to_cpup((__be64 *)data));
+ len = sysfs_emit(buf, "%llu\n", be64_to_cpup((__be64 *)data));
break;
-
default:
- ret = 0;
+ len = 0;
+ break;
}
- return ret;
+ return len;
}
static PORT_PMA_ATTR(symbol_error , 0, 16, 32);
@@ -815,12 +827,12 @@ static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
return 0;
}
-static ssize_t print_hw_stat(struct ib_device *dev, int port_num,
- struct rdma_hw_stats *stats, int index, char *buf)
+static int print_hw_stat(struct ib_device *dev, int port_num,
+ struct rdma_hw_stats *stats, int index, char *buf)
{
u64 v = rdma_counter_get_hwstat_value(dev, port_num, index);
- return sprintf(buf, "%llu\n", stats->value[index] + v);
+ return sysfs_emit(buf, "%llu\n", stats->value[index] + v);
}
static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr,
@@ -877,7 +889,7 @@ static ssize_t show_stats_lifespan(struct kobject *kobj,
msecs = jiffies_to_msecs(stats->lifespan);
mutex_unlock(&stats->lock);
- return sprintf(buf, "%d\n", msecs);
+ return sysfs_emit(buf, "%d\n", msecs);
}
static ssize_t set_stats_lifespan(struct kobject *kobj,
@@ -1224,21 +1236,34 @@ err_put:
return ret;
}
+static const char *node_type_string(int node_type)
+{
+ switch (node_type) {
+ case RDMA_NODE_IB_CA:
+ return "CA";
+ case RDMA_NODE_IB_SWITCH:
+ return "switch";
+ case RDMA_NODE_IB_ROUTER:
+ return "router";
+ case RDMA_NODE_RNIC:
+ return "RNIC";
+ case RDMA_NODE_USNIC:
+ return "usNIC";
+ case RDMA_NODE_USNIC_UDP:
+ return "usNIC UDP";
+ case RDMA_NODE_UNSPECIFIED:
+ return "unspecified";
+ }
+ return "<unknown>";
+}
+
static ssize_t node_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = rdma_device_to_ibdev(device);
- switch (dev->node_type) {
- case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
- case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
- case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type);
- case RDMA_NODE_USNIC_UDP: return sprintf(buf, "%d: usNIC UDP\n", dev->node_type);
- case RDMA_NODE_UNSPECIFIED: return sprintf(buf, "%d: unspecified\n", dev->node_type);
- case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
- case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
- default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
- }
+ return sysfs_emit(buf, "%d: %s\n", dev->node_type,
+ node_type_string(dev->node_type));
}
static DEVICE_ATTR_RO(node_type);
@@ -1246,12 +1271,13 @@ static ssize_t sys_image_guid_show(struct device *device,
struct device_attribute *dev_attr, char *buf)
{
struct ib_device *dev = rdma_device_to_ibdev(device);
+ __be16 *guid = (__be16 *)&dev->attrs.sys_image_guid;
- return sprintf(buf, "%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[0]),
- be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[1]),
- be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[2]),
- be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[3]));
+ return sysfs_emit(buf, "%04x:%04x:%04x:%04x\n",
+ be16_to_cpu(guid[0]),
+ be16_to_cpu(guid[1]),
+ be16_to_cpu(guid[2]),
+ be16_to_cpu(guid[3]));
}
static DEVICE_ATTR_RO(sys_image_guid);
@@ -1259,12 +1285,13 @@ static ssize_t node_guid_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = rdma_device_to_ibdev(device);
+ __be16 *node_guid = (__be16 *)&dev->node_guid;
- return sprintf(buf, "%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
- be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
- be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
- be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
+ return sysfs_emit(buf, "%04x:%04x:%04x:%04x\n",
+ be16_to_cpu(node_guid[0]),
+ be16_to_cpu(node_guid[1]),
+ be16_to_cpu(node_guid[2]),
+ be16_to_cpu(node_guid[3]));
}
static DEVICE_ATTR_RO(node_guid);
@@ -1273,7 +1300,7 @@ static ssize_t node_desc_show(struct device *device,
{
struct ib_device *dev = rdma_device_to_ibdev(device);
- return sprintf(buf, "%.64s\n", dev->node_desc);
+ return sysfs_emit(buf, "%.64s\n", dev->node_desc);
}
static ssize_t node_desc_store(struct device *device,
@@ -1300,10 +1327,11 @@ static ssize_t fw_ver_show(struct device *device, struct device_attribute *attr,
char *buf)
{
struct ib_device *dev = rdma_device_to_ibdev(device);
+ char version[IB_FW_VERSION_NAME_MAX] = {};
+
+ ib_get_device_fw_str(dev, version);
- ib_get_device_fw_str(dev, buf);
- strlcat(buf, "\n", IB_FW_VERSION_NAME_MAX);
- return strlen(buf);
+ return sysfs_emit(buf, "%s\n", version);
}
static DEVICE_ATTR_RO(fw_ver);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ffe2563ad345..da2512c30ffd 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -95,8 +95,6 @@ struct ucma_context {
u64 uid;
struct list_head list;
- /* sync between removal event and id destroy, protected by file mut */
- int destroying;
struct work_struct close_work;
};
@@ -122,7 +120,7 @@ static DEFINE_XARRAY_ALLOC(ctx_table);
static DEFINE_XARRAY_ALLOC(multicast_table);
static const struct file_operations ucma_fops;
-static int __destroy_id(struct ucma_context *ctx);
+static int ucma_destroy_private_ctx(struct ucma_context *ctx);
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
@@ -179,19 +177,14 @@ static void ucma_close_id(struct work_struct *work)
/* once all inflight tasks are finished, we close all underlying
* resources. The context is still alive till its explicit destryoing
- * by its creator.
+ * by its creator. This puts back the xarray's reference.
*/
ucma_put_ctx(ctx);
wait_for_completion(&ctx->comp);
/* No new events will be generated after destroying the id. */
rdma_destroy_id(ctx->cm_id);
- /*
- * At this point ctx->ref is zero so the only place the ctx can be is in
- * a uevent or in __destroy_id(). Since the former doesn't touch
- * ctx->cm_id and the latter sync cancels this, there is no races with
- * this store.
- */
+ /* Reading the cm_id without holding a positive ref is not allowed */
ctx->cm_id = NULL;
}
@@ -204,7 +197,6 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return NULL;
INIT_WORK(&ctx->close_work, ucma_close_id);
- refcount_set(&ctx->ref, 1);
init_completion(&ctx->comp);
/* So list_del() will work if we don't do ucma_finish_ctx() */
INIT_LIST_HEAD(&ctx->list);
@@ -218,6 +210,13 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return ctx;
}
+static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
+ struct rdma_cm_id *cm_id)
+{
+ refcount_set(&ctx->ref, 1);
+ ctx->cm_id = cm_id;
+}
+
static void ucma_finish_ctx(struct ucma_context *ctx)
{
lockdep_assert_held(&ctx->file->mut);
@@ -303,7 +302,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
ctx = ucma_alloc_ctx(listen_ctx->file);
if (!ctx)
goto err_backlog;
- ctx->cm_id = cm_id;
+ ucma_set_ctx_cm_id(ctx, cm_id);
uevent = ucma_create_uevent(listen_ctx, event);
if (!uevent)
@@ -321,8 +320,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
return 0;
err_alloc:
- xa_erase(&ctx_table, ctx->id);
- kfree(ctx);
+ ucma_destroy_private_ctx(ctx);
err_backlog:
atomic_inc(&listen_ctx->backlog);
/* Returning error causes the new ID to be destroyed */
@@ -356,8 +354,12 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
wake_up_interruptible(&ctx->file->poll_wait);
}
- if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
- queue_work(system_unbound_wq, &ctx->close_work);
+ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
+ xa_lock(&ctx_table);
+ if (xa_load(&ctx_table, ctx->id) == ctx)
+ queue_work(system_unbound_wq, &ctx->close_work);
+ xa_unlock(&ctx_table);
+ }
return 0;
}
@@ -461,13 +463,12 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
ret = PTR_ERR(cm_id);
goto err1;
}
- ctx->cm_id = cm_id;
+ ucma_set_ctx_cm_id(ctx, cm_id);
resp.id = ctx->id;
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp))) {
- xa_erase(&ctx_table, ctx->id);
- __destroy_id(ctx);
+ ucma_destroy_private_ctx(ctx);
return -EFAULT;
}
@@ -477,8 +478,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
return 0;
err1:
- xa_erase(&ctx_table, ctx->id);
- kfree(ctx);
+ ucma_destroy_private_ctx(ctx);
return ret;
}
@@ -516,68 +516,73 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
rdma_unlock_handler(mc->ctx->cm_id);
}
-/*
- * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
- * this point, no new events will be reported from the hardware. However, we
- * still need to cleanup the UCMA context for this ID. Specifically, there
- * might be events that have not yet been consumed by the user space software.
- * mutex. After that we release them as needed.
- */
-static int ucma_free_ctx(struct ucma_context *ctx)
+static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
{
int events_reported;
struct ucma_event *uevent, *tmp;
LIST_HEAD(list);
- ucma_cleanup_multicast(ctx);
-
- /* Cleanup events not yet reported to the user. */
+ /* Cleanup events not yet reported to the user.*/
mutex_lock(&ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
- if (uevent->ctx == ctx || uevent->conn_req_ctx == ctx)
+ if (uevent->ctx != ctx)
+ continue;
+
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
+ xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
+ uevent->conn_req_ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) == uevent->conn_req_ctx) {
list_move_tail(&uevent->list, &list);
+ continue;
+ }
+ list_del(&uevent->list);
+ kfree(uevent);
}
list_del(&ctx->list);
events_reported = ctx->events_reported;
mutex_unlock(&ctx->file->mut);
/*
- * If this was a listening ID then any connections spawned from it
- * that have not been delivered to userspace are cleaned up too.
- * Must be done outside any locks.
+ * If this was a listening ID then any connections spawned from it that
+ * have not been delivered to userspace are cleaned up too. Must be done
+ * outside any locks.
*/
list_for_each_entry_safe(uevent, tmp, &list, list) {
- list_del(&uevent->list);
- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
- uevent->conn_req_ctx != ctx)
- __destroy_id(uevent->conn_req_ctx);
+ ucma_destroy_private_ctx(uevent->conn_req_ctx);
kfree(uevent);
}
-
- mutex_destroy(&ctx->mutex);
- kfree(ctx);
return events_reported;
}
-static int __destroy_id(struct ucma_context *ctx)
+/*
+ * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
+ * the ctx is not public to the user). This either because:
+ * - ucma_finish_ctx() hasn't been called
+ * - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
+ */
+static int ucma_destroy_private_ctx(struct ucma_context *ctx)
{
+ int events_reported;
+
/*
- * If the refcount is already 0 then ucma_close_id() has already
- * destroyed the cm_id, otherwise holding the refcount keeps cm_id
- * valid. Prevent queue_work() from being called.
+ * Destroy the underlying cm_id. New work queuing is prevented now by
+ * the removal from the xarray. Once the work is cancled ref will either
+ * be 0 because the work ran to completion and consumed the ref from the
+ * xarray, or it will be positive because we still have the ref from the
+ * xarray. This can also be 0 in cases where cm_id was never set
*/
- if (refcount_inc_not_zero(&ctx->ref)) {
- rdma_lock_handler(ctx->cm_id);
- ctx->destroying = 1;
- rdma_unlock_handler(ctx->cm_id);
- ucma_put_ctx(ctx);
- }
-
cancel_work_sync(&ctx->close_work);
- /* At this point it's guaranteed that there is no inflight closing task */
- if (ctx->cm_id)
+ if (refcount_read(&ctx->ref))
ucma_close_id(&ctx->close_work);
- return ucma_free_ctx(ctx);
+
+ events_reported = ucma_cleanup_ctx_events(ctx);
+ ucma_cleanup_multicast(ctx);
+
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
+ GFP_KERNEL) != NULL);
+ mutex_destroy(&ctx->mutex);
+ kfree(ctx);
+ return events_reported;
}
static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
@@ -596,14 +601,17 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
xa_lock(&ctx_table);
ctx = _ucma_find_context(cmd.id, file);
- if (!IS_ERR(ctx))
- __xa_erase(&ctx_table, ctx->id);
+ if (!IS_ERR(ctx)) {
+ if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx)
+ ctx = ERR_PTR(-ENOENT);
+ }
xa_unlock(&ctx_table);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- resp.events_reported = __destroy_id(ctx);
+ resp.events_reported = ucma_destroy_private_ctx(ctx);
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -1777,15 +1785,16 @@ static int ucma_close(struct inode *inode, struct file *filp)
* prevented by this being a FD release function. The list_add_tail() in
* ucma_connect_event_handler() can run concurrently, however it only
* adds to the list *after* a listening ID. By only reading the first of
- * the list, and relying on __destroy_id() to block
+ * the list, and relying on ucma_destroy_private_ctx() to block
* ucma_connect_event_handler(), no additional locking is needed.
*/
while (!list_empty(&file->ctx_list)) {
struct ucma_context *ctx = list_first_entry(
&file->ctx_list, struct ucma_context, list);
- xa_erase(&ctx_table, ctx->id);
- __destroy_id(ctx);
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx);
+ ucma_destroy_private_ctx(ctx);
}
kfree(file);
return 0;
@@ -1825,7 +1834,7 @@ static ssize_t show_abi_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
+ return sysfs_emit(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
}
static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index e9fecbdf391b..917338db7ac1 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -84,6 +84,15 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
dma_addr_t mask;
int i;
+ if (umem->is_odp) {
+ unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
+
+ /* ODP must always be self consistent. */
+ if (!(pgsz_bitmap & page_size))
+ return 0;
+ return page_size;
+ }
+
/* rdma_for_each_block() has a bug if the page size is smaller than the
* page size used to build the umem. For now prevent smaller page sizes
* from being returned.
@@ -126,7 +135,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
*/
if (mask)
pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
- return rounddown_pow_of_two(pgsz_bitmap);
+ return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
}
EXPORT_SYMBOL(ib_umem_find_best_pgsz);
@@ -220,10 +229,10 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
cur_base += ret * PAGE_SIZE;
npages -= ret;
- sg = __sg_alloc_table_from_pages(
- &umem->sg_head, page_list, ret, 0, ret << PAGE_SHIFT,
- dma_get_max_seg_size(device->dma_device), sg, npages,
- GFP_KERNEL);
+ sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
+ 0, ret << PAGE_SHIFT,
+ ib_dma_max_seg_size(device), sg, npages,
+ GFP_KERNEL);
umem->sg_nents = umem->sg_head.nents;
if (IS_ERR(sg)) {
unpin_user_pages_dirty_lock(page_list, ret, 0);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index b0d0b522cc76..19104a675691 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1191,7 +1191,7 @@ static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
if (!port)
return -ENODEV;
- return sprintf(buf, "%s\n", dev_name(&port->ib_dev->dev));
+ return sysfs_emit(buf, "%s\n", dev_name(&port->ib_dev->dev));
}
static DEVICE_ATTR_RO(ibdev);
@@ -1203,7 +1203,7 @@ static ssize_t port_show(struct device *dev, struct device_attribute *attr,
if (!port)
return -ENODEV;
- return sprintf(buf, "%d\n", port->port_num);
+ return sysfs_emit(buf, "%d\n", port->port_num);
}
static DEVICE_ATTR_RO(port);
@@ -1222,7 +1222,7 @@ static char *umad_devnode(struct device *dev, umode_t *mode)
static ssize_t abi_version_show(struct class *class,
struct class_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
+ return sysfs_emit(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
}
static CLASS_ATTR_RO(abi_version);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 418d133a8fb0..98a5d36813ff 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -681,8 +681,7 @@ int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
return 0;
ret = ib_dealloc_xrcd_user(xrcd, &attrs->driver_udata);
-
- if (ib_is_destroy_retryable(ret, why, uobject)) {
+ if (ret) {
atomic_inc(&xrcd->usecnt);
return ret;
}
@@ -690,7 +689,7 @@ int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
if (inode)
xrcd_table_delete(dev, inode);
- return ret;
+ return 0;
}
static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
@@ -710,29 +709,20 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
return -EINVAL;
- ret = ib_check_mr_access(cmd.access_flags);
- if (ret)
- return ret;
-
uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
+ ret = ib_check_mr_access(ib_dev, cmd.access_flags);
+ if (ret)
+ goto err_free;
+
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
if (!pd) {
ret = -EINVAL;
goto err_free;
}
- if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
- if (!(pd->device->attrs.device_cap_flags &
- IB_DEVICE_ON_DEMAND_PAGING)) {
- pr_debug("ODP support not available\n");
- ret = -EINVAL;
- goto err_put;
- }
- }
-
mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
cmd.access_flags,
&attrs->driver_udata);
@@ -774,23 +764,28 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_rereg_mr cmd;
struct ib_uverbs_rereg_mr_resp resp;
- struct ib_pd *pd = NULL;
struct ib_mr *mr;
- struct ib_pd *old_pd;
int ret;
struct ib_uobject *uobj;
+ struct ib_uobject *new_uobj;
+ struct ib_device *ib_dev;
+ struct ib_pd *orig_pd;
+ struct ib_pd *new_pd;
+ struct ib_mr *new_mr;
ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
- if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
+ if (!cmd.flags)
return -EINVAL;
+ if (cmd.flags & ~IB_MR_REREG_SUPPORTED)
+ return -EOPNOTSUPP;
+
if ((cmd.flags & IB_MR_REREG_TRANS) &&
- (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
- (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
- return -EINVAL;
+ (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
+ return -EINVAL;
uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
if (IS_ERR(uobj))
@@ -804,36 +799,74 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
}
if (cmd.flags & IB_MR_REREG_ACCESS) {
- ret = ib_check_mr_access(cmd.access_flags);
+ ret = ib_check_mr_access(mr->device, cmd.access_flags);
if (ret)
goto put_uobjs;
}
+ orig_pd = mr->pd;
if (cmd.flags & IB_MR_REREG_PD) {
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
- attrs);
- if (!pd) {
+ new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
+ attrs);
+ if (!new_pd) {
ret = -EINVAL;
goto put_uobjs;
}
+ } else {
+ new_pd = mr->pd;
}
- old_pd = mr->pd;
- ret = mr->device->ops.rereg_user_mr(mr, cmd.flags, cmd.start,
- cmd.length, cmd.hca_va,
- cmd.access_flags, pd,
- &attrs->driver_udata);
- if (ret)
+ /*
+ * The driver might create a new HW object as part of the rereg, we need
+ * to have a uobject ready to hold it.
+ */
+ new_uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
+ if (IS_ERR(new_uobj)) {
+ ret = PTR_ERR(new_uobj);
goto put_uobj_pd;
-
- if (cmd.flags & IB_MR_REREG_PD) {
- atomic_inc(&pd->usecnt);
- mr->pd = pd;
- atomic_dec(&old_pd->usecnt);
}
- if (cmd.flags & IB_MR_REREG_TRANS)
- mr->iova = cmd.hca_va;
+ new_mr = ib_dev->ops.rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length,
+ cmd.hca_va, cmd.access_flags, new_pd,
+ &attrs->driver_udata);
+ if (IS_ERR(new_mr)) {
+ ret = PTR_ERR(new_mr);
+ goto put_new_uobj;
+ }
+ if (new_mr) {
+ new_mr->device = new_pd->device;
+ new_mr->pd = new_pd;
+ new_mr->type = IB_MR_TYPE_USER;
+ new_mr->dm = NULL;
+ new_mr->sig_attrs = NULL;
+ new_mr->uobject = uobj;
+ atomic_inc(&new_pd->usecnt);
+ new_mr->iova = cmd.hca_va;
+ new_uobj->object = new_mr;
+
+ rdma_restrack_new(&new_mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_set_name(&new_mr->res, NULL);
+ rdma_restrack_add(&new_mr->res);
+
+ /*
+ * The new uobj for the new HW object is put into the same spot
+ * in the IDR and the old uobj & HW object is deleted.
+ */
+ rdma_assign_uobject(uobj, new_uobj, attrs);
+ rdma_alloc_commit_uobject(new_uobj, attrs);
+ uobj_put_destroy(uobj);
+ new_uobj = NULL;
+ uobj = NULL;
+ mr = new_mr;
+ } else {
+ if (cmd.flags & IB_MR_REREG_PD) {
+ atomic_dec(&orig_pd->usecnt);
+ mr->pd = new_pd;
+ atomic_inc(&new_pd->usecnt);
+ }
+ if (cmd.flags & IB_MR_REREG_TRANS)
+ mr->iova = cmd.hca_va;
+ }
memset(&resp, 0, sizeof(resp));
resp.lkey = mr->lkey;
@@ -841,12 +874,16 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
ret = uverbs_response(attrs, &resp, sizeof(resp));
+put_new_uobj:
+ if (new_uobj)
+ uobj_alloc_abort(new_uobj, attrs);
put_uobj_pd:
if (cmd.flags & IB_MR_REREG_PD)
- uobj_put_obj_read(pd);
+ uobj_put_obj_read(new_pd);
put_uobjs:
- uobj_put_write(uobj);
+ if (uobj)
+ uobj_put_write(uobj);
return ret;
}
@@ -1401,8 +1438,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
- qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
- obj);
+ qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata, obj,
+ NULL);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
@@ -1906,8 +1943,7 @@ static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs)
if (ret)
return ret;
- if (cmd.base.attr_mask &
- ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
+ if (cmd.base.attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
return modify_qp(attrs, &cmd);
@@ -1929,10 +1965,7 @@ static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
* Last bit is reserved for extending the attr_mask by
* using another field.
*/
- BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1ULL << 31));
-
- if (cmd.base.attr_mask &
- ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
+ if (cmd.base.attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
return -EOPNOTSUPP;
ret = modify_qp(attrs, &cmd);
@@ -3693,13 +3726,13 @@ const struct uapi_definition uverbs_def_write_intf[] = {
ib_uverbs_create_ah,
UAPI_DEF_WRITE_UDATA_IO(
struct ib_uverbs_create_ah,
- struct ib_uverbs_create_ah_resp),
- UAPI_DEF_METHOD_NEEDS_FN(create_ah)),
+ struct ib_uverbs_create_ah_resp)),
DECLARE_UVERBS_WRITE(
IB_USER_VERBS_CMD_DESTROY_AH,
ib_uverbs_destroy_ah,
- UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah),
- UAPI_DEF_METHOD_NEEDS_FN(destroy_ah))),
+ UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah)),
+ UAPI_DEF_OBJ_NEEDS_FN(create_user_ah),
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
DECLARE_UVERBS_OBJECT(
UVERBS_OBJECT_COMP_CHANNEL,
@@ -3753,7 +3786,7 @@ const struct uapi_definition uverbs_def_write_intf[] = {
IB_USER_VERBS_EX_CMD_MODIFY_CQ,
ib_uverbs_ex_modify_cq,
UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
- UAPI_DEF_METHOD_NEEDS_FN(create_cq))),
+ UAPI_DEF_METHOD_NEEDS_FN(modify_cq))),
DECLARE_UVERBS_OBJECT(
UVERBS_OBJECT_DEVICE,
@@ -3999,8 +4032,7 @@ const struct uapi_definition uverbs_def_write_intf[] = {
DECLARE_UVERBS_WRITE(
IB_USER_VERBS_CMD_CLOSE_XRCD,
ib_uverbs_close_xrcd,
- UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd),
- UAPI_DEF_METHOD_NEEDS_FN(dealloc_xrcd)),
+ UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd)),
DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
ib_uverbs_open_qp,
UAPI_DEF_WRITE_UDATA_IO(
@@ -4010,8 +4042,9 @@ const struct uapi_definition uverbs_def_write_intf[] = {
ib_uverbs_open_xrcd,
UAPI_DEF_WRITE_UDATA_IO(
struct ib_uverbs_open_xrcd,
- struct ib_uverbs_open_xrcd_resp),
- UAPI_DEF_METHOD_NEEDS_FN(alloc_xrcd))),
+ struct ib_uverbs_open_xrcd_resp)),
+ UAPI_DEF_OBJ_NEEDS_FN(alloc_xrcd),
+ UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)),
{},
};
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 4bb7c642f80c..f173ecd102dc 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -1046,7 +1046,7 @@ static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
- ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
+ ret = sysfs_emit(buf, "%s\n", dev_name(&ib_dev->dev));
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
return ret;
@@ -1065,7 +1065,7 @@ static ssize_t abi_version_show(struct device *device,
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
- ret = sprintf(buf, "%u\n", ib_dev->ops.uverbs_abi_ver);
+ ret = sysfs_emit(buf, "%u\n", ib_dev->ops.uverbs_abi_ver);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
return ret;
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 0658101fca00..13776a66e2e4 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -88,7 +88,7 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
return -EBUSY;
ret = rwq_ind_tbl->device->ops.destroy_rwq_ind_table(rwq_ind_tbl);
- if (ib_is_destroy_retryable(ret, why, uobject))
+ if (ret)
return ret;
for (i = 0; i < table_size; i++)
@@ -96,7 +96,7 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
kfree(rwq_ind_tbl);
kfree(ind_tbl);
- return ret;
+ return 0;
}
static int uverbs_free_xrcd(struct ib_uobject *uobject,
@@ -108,9 +108,8 @@ static int uverbs_free_xrcd(struct ib_uobject *uobject,
container_of(uobject, struct ib_uxrcd_object, uobject);
int ret;
- ret = ib_destroy_usecnt(&uxrcd->refcnt, why, uobject);
- if (ret)
- return ret;
+ if (atomic_read(&uxrcd->refcnt))
+ return -EBUSY;
mutex_lock(&attrs->ufile->device->xrcd_tree_mutex);
ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, attrs);
@@ -124,11 +123,9 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
struct uverbs_attr_bundle *attrs)
{
struct ib_pd *pd = uobject->object;
- int ret;
- ret = ib_destroy_usecnt(&pd->usecnt, why, uobject);
- if (ret)
- return ret;
+ if (atomic_read(&pd->usecnt))
+ return -EBUSY;
return ib_dealloc_pd_user(pd, &attrs->driver_udata);
}
@@ -157,7 +154,7 @@ void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue)
spin_unlock_irq(&event_queue->lock);
}
-static int
+static void
uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj,
enum rdma_remove_reason why)
{
@@ -166,7 +163,6 @@ uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj,
uobj);
ib_uverbs_free_event_queue(&file->ev_queue);
- return 0;
}
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
diff --git a/drivers/infiniband/core/uverbs_std_types_async_fd.c b/drivers/infiniband/core/uverbs_std_types_async_fd.c
index 61899eaf1f91..cc24cfdf7aee 100644
--- a/drivers/infiniband/core/uverbs_std_types_async_fd.c
+++ b/drivers/infiniband/core/uverbs_std_types_async_fd.c
@@ -19,8 +19,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_ASYNC_EVENT_ALLOC)(
return 0;
}
-static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static void uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
struct ib_uverbs_async_event_file *event_file =
container_of(uobj, struct ib_uverbs_async_event_file, uobj);
@@ -30,7 +30,6 @@ static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
if (why == RDMA_REMOVE_DRIVER_REMOVE)
ib_uverbs_async_handler(event_file, 0, IB_EVENT_DEVICE_FATAL,
NULL, NULL);
- return 0;
}
int uverbs_async_event_release(struct inode *inode, struct file *filp)
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index b3c6c066b601..999da9c79866 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -42,9 +42,8 @@ static int uverbs_free_counters(struct ib_uobject *uobject,
struct ib_counters *counters = uobject->object;
int ret;
- ret = ib_destroy_usecnt(&counters->usecnt, why, uobject);
- if (ret)
- return ret;
+ if (atomic_read(&counters->usecnt))
+ return -EBUSY;
ret = counters->device->ops.destroy_counters(counters);
if (ret)
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index 8dabd05988b2..370ad7c83f88 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -46,7 +46,7 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_cq_user(cq, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
+ if (ret)
return ret;
ib_uverbs_release_ucq(
@@ -55,7 +55,7 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
ev_queue) :
NULL,
ucq);
- return ret;
+ return 0;
}
static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 302f898c5833..9ec6971056fa 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -317,8 +317,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
struct ib_device *ib_dev;
size_t user_entry_size;
ssize_t num_entries;
- size_t max_entries;
- size_t num_bytes;
+ int max_entries;
u32 flags;
int ret;
@@ -336,19 +335,16 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
user_entry_size);
if (max_entries <= 0)
- return -EINVAL;
+ return max_entries ?: -EINVAL;
ucontext = ib_uverbs_get_ucontext(attrs);
if (IS_ERR(ucontext))
return PTR_ERR(ucontext);
ib_dev = ucontext->device;
- if (check_mul_overflow(max_entries, sizeof(*entries), &num_bytes))
- return -EINVAL;
-
- entries = uverbs_zalloc(attrs, num_bytes);
- if (!entries)
- return -ENOMEM;
+ entries = uverbs_kcalloc(attrs, max_entries, sizeof(*entries));
+ if (IS_ERR(entries))
+ return PTR_ERR(entries);
num_entries = rdma_query_gid_table(ib_dev, entries, max_entries);
if (num_entries < 0)
diff --git a/drivers/infiniband/core/uverbs_std_types_dm.c b/drivers/infiniband/core/uverbs_std_types_dm.c
index d5a1de33c2c9..98c522cf86d6 100644
--- a/drivers/infiniband/core/uverbs_std_types_dm.c
+++ b/drivers/infiniband/core/uverbs_std_types_dm.c
@@ -39,11 +39,9 @@ static int uverbs_free_dm(struct ib_uobject *uobject,
struct uverbs_attr_bundle *attrs)
{
struct ib_dm *dm = uobject->object;
- int ret;
- ret = ib_destroy_usecnt(&dm->usecnt, why, uobject);
- if (ret)
- return ret;
+ if (atomic_read(&dm->usecnt))
+ return -EBUSY;
return dm->device->ops.dealloc_dm(dm, attrs);
}
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index 459cf165b231..d42ed7ff223e 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -39,11 +39,9 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
struct uverbs_attr_bundle *attrs)
{
struct ib_flow_action *action = uobject->object;
- int ret;
- ret = ib_destroy_usecnt(&action->usecnt, why, uobject);
- if (ret)
- return ret;
+ if (atomic_read(&action->usecnt))
+ return -EBUSY;
return action->device->ops.destroy_flow_action(action);
}
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 9b22bb553e8b..dd4e76b26c74 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -33,6 +33,7 @@
#include "rdma_core.h"
#include "uverbs.h"
#include <rdma/uverbs_std_types.h>
+#include "restrack.h"
static int uverbs_free_mr(struct ib_uobject *uobject,
enum rdma_remove_reason why,
@@ -114,7 +115,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
if (!(attr.access_flags & IB_ZERO_BASED))
return -EINVAL;
- ret = ib_check_mr_access(attr.access_flags);
+ ret = ib_check_mr_access(ib_dev, attr.access_flags);
if (ret)
return ret;
@@ -134,6 +135,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
atomic_inc(&pd->usecnt);
atomic_inc(&dm->usecnt);
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_set_name(&mr->res, NULL);
+ rdma_restrack_add(&mr->res);
uobj->object = mr;
uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE);
diff --git a/drivers/infiniband/core/uverbs_std_types_qp.c b/drivers/infiniband/core/uverbs_std_types_qp.c
index 3bf8dcdfe7eb..c00cfb5ed387 100644
--- a/drivers/infiniband/core/uverbs_std_types_qp.c
+++ b/drivers/infiniband/core/uverbs_std_types_qp.c
@@ -32,14 +32,14 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
}
ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
+ if (ret)
return ret;
if (uqp->uxrcd)
atomic_dec(&uqp->uxrcd->refcnt);
ib_uverbs_release_uevent(&uqp->uevent);
- return ret;
+ return 0;
}
static int check_creation_flags(enum ib_qp_type qp_type,
@@ -251,8 +251,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
if (attr.qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
- qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
- obj);
+ qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata, obj,
+ NULL);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
diff --git a/drivers/infiniband/core/uverbs_std_types_srq.c b/drivers/infiniband/core/uverbs_std_types_srq.c
index c0ecbba26bf4..e5513f828bdc 100644
--- a/drivers/infiniband/core/uverbs_std_types_srq.c
+++ b/drivers/infiniband/core/uverbs_std_types_srq.c
@@ -18,7 +18,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
+ if (ret)
return ret;
if (srq_type == IB_SRQT_XRC) {
@@ -30,7 +30,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
}
ib_uverbs_release_uevent(uevent);
- return ret;
+ return 0;
}
static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_CREATE)(
diff --git a/drivers/infiniband/core/uverbs_std_types_wq.c b/drivers/infiniband/core/uverbs_std_types_wq.c
index f2e6a625724a..7ded8339346f 100644
--- a/drivers/infiniband/core/uverbs_std_types_wq.c
+++ b/drivers/infiniband/core/uverbs_std_types_wq.c
@@ -17,11 +17,11 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_wq_user(wq, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
+ if (ret)
return ret;
ib_uverbs_release_uevent(&uwq->uevent);
- return ret;
+ return 0;
}
static int UVERBS_HANDLER(UVERBS_METHOD_WQ_CREATE)(
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 5addc8fae3f3..62f5bcb712cf 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -79,10 +79,7 @@ static int uapi_create_write(struct uverbs_api *uapi,
method_elm->is_ex = def->write.is_ex;
method_elm->handler = def->func_write;
- if (def->write.is_ex)
- method_elm->disabled = !(ibdev->uverbs_ex_cmd_mask &
- BIT_ULL(def->write.command_num));
- else
+ if (!def->write.is_ex)
method_elm->disabled = !(ibdev->uverbs_cmd_mask &
BIT_ULL(def->write.command_num));
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 740f8454b6b4..9137a25bb521 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
/* Protection domains */
/**
- * ib_alloc_pd - Allocates an unused protection domain.
+ * __ib_alloc_pd - Allocates an unused protection domain.
* @device: The device on which to allocate the protection domain.
* @flags: protection domain flags
* @caller: caller's build-time module name
@@ -516,7 +516,7 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
- if (!device->ops.create_ah)
+ if (!udata && !device->ops.create_ah)
return ERR_PTR(-EOPNOTSUPP);
ah = rdma_zalloc_drv_obj_gfp(
@@ -533,7 +533,10 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
init_attr.flags = flags;
init_attr.xmit_slave = xmit_slave;
- ret = device->ops.create_ah(ah, &init_attr, udata);
+ if (udata)
+ ret = device->ops.create_user_ah(ah, &init_attr, udata);
+ else
+ ret = device->ops.create_ah(ah, &init_attr, NULL);
if (ret) {
kfree(ah);
return ERR_PTR(ret);
@@ -1188,17 +1191,19 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
}
/**
- * ib_create_qp - Creates a kernel QP associated with the specified protection
+ * ib_create_named_qp - Creates a kernel QP associated with the specified protection
* domain.
* @pd: The protection domain associated with the QP.
* @qp_init_attr: A list of initial attributes required to create the
* QP. If QP creation succeeds, then the attributes are updated to
* the actual capabilities of the created QP.
+ * @caller: caller's build-time module name
*
* NOTE: for user qp use ib_create_qp_user with valid udata!
*/
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr)
+struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ const char *caller)
{
struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
struct ib_qp *qp;
@@ -1223,7 +1228,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
- qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
+ qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
if (IS_ERR(qp))
return qp;
@@ -1289,7 +1294,7 @@ err:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_create_qp);
+EXPORT_SYMBOL(ib_create_named_qp);
static const struct {
int valid;
@@ -1662,7 +1667,7 @@ static bool is_qp_type_connected(const struct ib_qp *qp)
qp->qp_type == IB_QPT_XRC_TGT);
}
-/**
+/*
* IB core internal function to perform QP attributes modification.
*/
static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
@@ -1698,8 +1703,10 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
slave = rdma_lag_get_ah_roce_slave(qp->device,
&attr->ah_attr,
GFP_KERNEL);
- if (IS_ERR(slave))
+ if (IS_ERR(slave)) {
+ ret = PTR_ERR(slave);
goto out_av;
+ }
attr->xmit_slave = slave;
}
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index cf3db9628397..401bdc9e931e 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1271,10 +1271,12 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
}
qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
- if (init_attr->create_flags)
+ if (init_attr->create_flags) {
ibdev_dbg(&rdev->ibdev,
"QP create flags 0x%x not supported",
init_attr->create_flags);
+ return -EOPNOTSUPP;
+ }
/* Setup CQs */
if (init_attr->send_cq) {
@@ -1657,8 +1659,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
srq->qplib_srq.max_wqe = entries;
srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
- srq->qplib_srq.wqe_size =
- bnxt_re_get_rwqe_size(srq->qplib_srq.max_sge);
+ /* 128 byte wqe size for SRQ . So use max sges */
+ srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit;
srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
@@ -1829,6 +1831,9 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
unsigned int flags;
u8 nw_type;
+ if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
qp->qplib_qp.modify_flags = 0;
if (qp_attr_mask & IB_QP_STATE) {
curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
@@ -2078,6 +2083,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
goto out;
}
qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
+ qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
qp_attr->pkey_index = qplib_qp->pkey_index;
@@ -2827,6 +2833,9 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct bnxt_qplib_nq *nq = NULL;
unsigned int nq_alloc_cnt;
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
/* Validate CQ fields */
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 04621ba8fa76..fdb8c2478258 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -608,7 +608,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct bnxt_re_dev *rdev =
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
+ return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -618,7 +618,7 @@ static ssize_t hca_type_show(struct device *device,
struct bnxt_re_dev *rdev =
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
+ return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc);
}
static DEVICE_ATTR_RO(hca_type);
@@ -646,6 +646,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.create_cq = bnxt_re_create_cq,
.create_qp = bnxt_re_create_qp,
.create_srq = bnxt_re_create_srq,
+ .create_user_ah = bnxt_re_create_ah,
.dealloc_driver = bnxt_re_dealloc_driver,
.dealloc_pd = bnxt_re_dealloc_pd,
.dealloc_ucontext = bnxt_re_dealloc_ucontext,
@@ -701,35 +702,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->dev.parent = &rdev->en_dev->pdev->dev;
ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
- /* User space */
- ibdev->uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_REREG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
- (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
- /* POLL_CQ and REQ_NOTIFY_CQ is directly handled in libbnxt_re */
-
-
rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 64d44f51db4b..6316179583a6 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -118,7 +118,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
* 128 WQEs needs to be reserved for the HW (8916). Prevent
* reporting the max number
*/
- attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
+ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx) ?
6 : sb->max_sge;
attr->max_cq = le32_to_cpu(sb->max_cq);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 28349ed50885..44c2416588d4 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -1006,6 +1006,9 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
pr_debug("ib_dev %p entries %d\n", ibdev, entries);
if (attr->flags)
+ return -EOPNOTSUPP;
+
+ if (entries < 1 || entries > ibdev->attrs.max_cqe)
return -EINVAL;
if (vector >= rhp->rdev.lldi.nciq)
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index a27899402f59..f85477f3b037 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -983,9 +983,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
-int c4iw_dealloc_mw(struct ib_mw *mw);
void c4iw_dealloc(struct uld_ctx *ctx);
-int c4iw_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 42234df896fb..a2c71a1d93d5 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -365,22 +365,6 @@ static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
pbl_size, pbl_addr, skb, wr_waitp);
}
-static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
- struct c4iw_wr_wait *wr_waitp)
-{
- *stag = T4_STAG_UNSET;
- return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
- 0UL, 0, 0, 0, 0, NULL, wr_waitp);
-}
-
-static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
- struct sk_buff *skb,
- struct c4iw_wr_wait *wr_waitp)
-{
- return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
- 0, skb, wr_waitp);
-}
-
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
u32 pbl_size, u32 pbl_addr,
struct c4iw_wr_wait *wr_waitp)
@@ -611,74 +595,6 @@ err_free_mhp:
return ERR_PTR(err);
}
-int c4iw_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
-{
- struct c4iw_mw *mhp = to_c4iw_mw(ibmw);
- struct c4iw_dev *rhp;
- struct c4iw_pd *php;
- u32 mmid;
- u32 stag = 0;
- int ret;
-
- if (ibmw->type != IB_MW_TYPE_1)
- return -EINVAL;
-
- php = to_c4iw_pd(ibmw->pd);
- rhp = php->rhp;
- mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
- if (!mhp->wr_waitp)
- return -ENOMEM;
-
- mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
- if (!mhp->dereg_skb) {
- ret = -ENOMEM;
- goto free_wr_wait;
- }
-
- ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
- if (ret)
- goto free_skb;
-
- mhp->rhp = rhp;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = FW_RI_STAG_MW;
- mhp->attr.stag = stag;
- mmid = (stag) >> 8;
- ibmw->rkey = stag;
- if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto dealloc_win;
- }
- pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
- return 0;
-
-dealloc_win:
- deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
- mhp->wr_waitp);
-free_skb:
- kfree_skb(mhp->dereg_skb);
-free_wr_wait:
- c4iw_put_wr_wait(mhp->wr_waitp);
- return ret;
-}
-
-int c4iw_dealloc_mw(struct ib_mw *mw)
-{
- struct c4iw_dev *rhp;
- struct c4iw_mw *mhp;
- u32 mmid;
-
- mhp = to_c4iw_mw(mw);
- rhp = mhp->rhp;
- mmid = (mw->rkey) >> 8;
- xa_erase_irq(&rhp->mrs, mmid);
- deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
- mhp->wr_waitp);
- kfree_skb(mhp->dereg_skb);
- c4iw_put_wr_wait(mhp->wr_waitp);
- return 0;
-}
-
struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 8138c57a1e43..1f1f856f8715 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -322,8 +322,9 @@ static ssize_t hw_rev_show(struct device *dev,
rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
pr_debug("dev 0x%p\n", dev);
- return sprintf(buf, "%d\n",
- CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
+ return sysfs_emit(
+ buf, "%d\n",
+ CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
static DEVICE_ATTR_RO(hw_rev);
@@ -337,7 +338,7 @@ static ssize_t hca_type_show(struct device *dev,
pr_debug("dev 0x%p\n", dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.driver);
+ return sysfs_emit(buf, "%s\n", info.driver);
}
static DEVICE_ATTR_RO(hca_type);
@@ -348,8 +349,8 @@ static ssize_t board_id_show(struct device *dev, struct device_attribute *attr,
rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
pr_debug("dev 0x%p\n", dev);
- return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
- c4iw_dev->rdev.lldi.pdev->device);
+ return sysfs_emit(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
+ c4iw_dev->rdev.lldi.pdev->device);
}
static DEVICE_ATTR_RO(board_id);
@@ -456,13 +457,11 @@ static const struct ib_device_ops c4iw_dev_ops = {
.alloc_hw_stats = c4iw_alloc_stats,
.alloc_mr = c4iw_alloc_mr,
- .alloc_mw = c4iw_alloc_mw,
.alloc_pd = c4iw_allocate_pd,
.alloc_ucontext = c4iw_alloc_ucontext,
.create_cq = c4iw_create_cq,
.create_qp = c4iw_create_qp,
.create_srq = c4iw_create_srq,
- .dealloc_mw = c4iw_dealloc_mw,
.dealloc_pd = c4iw_deallocate_pd,
.dealloc_ucontext = c4iw_dealloc_ucontext,
.dereg_mr = c4iw_dereg_mr,
@@ -533,28 +532,6 @@ void c4iw_register_device(struct work_struct *work)
if (fastreg_support)
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
dev->ibdev.local_dma_lkey = 0;
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
dev->ibdev.node_type = RDMA_NODE_RNIC;
BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index f20379e4e2ec..d109bb3822a5 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2126,7 +2126,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
pr_debug("ib_pd %p\n", pd);
- if (attrs->qp_type != IB_QPT_RC)
+ if (attrs->qp_type != IB_QPT_RC || attrs->create_flags)
return ERR_PTR(-EOPNOTSUPP);
php = to_c4iw_pd(pd);
@@ -2374,6 +2374,9 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
pr_debug("ib_qp %p\n", ibqp);
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
/* iwarp does not support the RTR state */
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
attr_mask &= ~IB_QP_STATE;
@@ -2471,7 +2474,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
@@ -2680,6 +2683,9 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
int ret;
int wr_len;
+ if (attrs->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
pr_debug("%s ib_pd %p\n", __func__, pd);
php = to_c4iw_pd(pd);
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 6faed3a81e08..0f578734bddb 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -245,9 +245,9 @@ static const struct ib_device_ops efa_dev_ops = {
.alloc_hw_stats = efa_alloc_hw_stats,
.alloc_pd = efa_alloc_pd,
.alloc_ucontext = efa_alloc_ucontext,
- .create_ah = efa_create_ah,
.create_cq = efa_create_cq,
.create_qp = efa_create_qp,
+ .create_user_ah = efa_create_ah,
.dealloc_pd = efa_dealloc_pd,
.dealloc_ucontext = efa_dealloc_ucontext,
.dereg_mr = efa_dereg_mr,
@@ -308,27 +308,6 @@ static int efa_ib_device_add(struct efa_dev *dev)
dev->ibdev.num_comp_vectors = 1;
dev->ibdev.dev.parent = &pdev->dev;
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
-
- dev->ibdev.uverbs_ex_cmd_mask =
- (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
-
ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
@@ -405,19 +384,12 @@ static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
return err;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_width));
if (err) {
- dev_err(&pdev->dev, "pci_set_dma_mask failed %d\n", err);
+ dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", err);
return err;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
- if (err) {
- dev_err(&pdev->dev,
- "err_pci_set_consistent_dma_mask failed %d\n",
- err);
- return err;
- }
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 4e940fc50bba..479b604e533a 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -917,6 +917,9 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
enum ib_qp_state new_state;
int err;
+ if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
if (udata->inlen &&
!ib_is_udata_cleared(udata, 0, udata->inlen)) {
ibdev_dbg(&dev->ibdev,
@@ -1029,6 +1032,9 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
ibdev_dbg(ibdev,
"cq: requested entries[%u] non-positive or greater than max[%u]\n",
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 356518e17fa6..681bb4e918c9 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -339,6 +339,7 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
return -EINVAL;
if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf)
return -EINVAL;
+ break;
default:
break;
}
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 074ec71772d2..5650130e68d4 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -151,7 +151,7 @@ struct hfi1_port_attr {
static ssize_t cc_prescan_show(struct hfi1_pportdata *ppd, char *buf)
{
- return sprintf(buf, "%s\n", ppd->cc_prescan ? "on" : "off");
+ return sysfs_emit(buf, "%s\n", ppd->cc_prescan ? "on" : "off");
}
static ssize_t cc_prescan_store(struct hfi1_pportdata *ppd, const char *buf,
@@ -296,7 +296,7 @@ static ssize_t sc2vl_attr_show(struct kobject *kobj, struct attribute *attr,
container_of(kobj, struct hfi1_pportdata, sc2vl_kobj);
struct hfi1_devdata *dd = ppd->dd;
- return sprintf(buf, "%u\n", *((u8 *)dd->sc2vl + sattr->sc));
+ return sysfs_emit(buf, "%u\n", *((u8 *)dd->sc2vl + sattr->sc));
}
static const struct sysfs_ops hfi1_sc2vl_ops = {
@@ -401,7 +401,7 @@ static ssize_t sl2sc_attr_show(struct kobject *kobj, struct attribute *attr,
container_of(kobj, struct hfi1_pportdata, sl2sc_kobj);
struct hfi1_ibport *ibp = &ppd->ibport_data;
- return sprintf(buf, "%u\n", ibp->sl_to_sc[sattr->sl]);
+ return sysfs_emit(buf, "%u\n", ibp->sl_to_sc[sattr->sl]);
}
static const struct sysfs_ops hfi1_sl2sc_ops = {
@@ -475,7 +475,7 @@ static ssize_t vl2mtu_attr_show(struct kobject *kobj, struct attribute *attr,
container_of(kobj, struct hfi1_pportdata, vl2mtu_kobj);
struct hfi1_devdata *dd = ppd->dd;
- return sprintf(buf, "%u\n", dd->vld[vlattr->vl].mtu);
+ return sysfs_emit(buf, "%u\n", dd->vld[vlattr->vl].mtu);
}
static const struct sysfs_ops hfi1_vl2mtu_ops = {
@@ -500,7 +500,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct hfi1_ibdev *dev =
rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
- return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
+ return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -510,13 +510,11 @@ static ssize_t board_id_show(struct device *device,
struct hfi1_ibdev *dev =
rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
struct hfi1_devdata *dd = dd_from_dev(dev);
- int ret;
if (!dd->boardname)
- ret = -EINVAL;
- else
- ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
- return ret;
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", dd->boardname);
}
static DEVICE_ATTR_RO(board_id);
@@ -528,7 +526,7 @@ static ssize_t boardversion_show(struct device *device,
struct hfi1_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
+ return sysfs_emit(buf, "%s", dd->boardversion);
}
static DEVICE_ATTR_RO(boardversion);
@@ -545,9 +543,9 @@ static ssize_t nctxts_show(struct device *device,
* and a receive context, so returning the smaller of the two counts
* give a more accurate picture of total contexts available.
*/
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- min(dd->num_user_contexts,
- (u32)dd->sc_sizes[SC_USER].count));
+ return sysfs_emit(buf, "%u\n",
+ min(dd->num_user_contexts,
+ (u32)dd->sc_sizes[SC_USER].count));
}
static DEVICE_ATTR_RO(nctxts);
@@ -559,7 +557,7 @@ static ssize_t nfreectxts_show(struct device *device,
struct hfi1_devdata *dd = dd_from_dev(dev);
/* Return the number of free user ports (contexts) available. */
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
+ return sysfs_emit(buf, "%u\n", dd->freectxts);
}
static DEVICE_ATTR_RO(nfreectxts);
@@ -570,7 +568,8 @@ static ssize_t serial_show(struct device *device,
rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
struct hfi1_devdata *dd = dd_from_dev(dev);
- return scnprintf(buf, PAGE_SIZE, "%s", dd->serial);
+ /* dd->serial is already newline terminated in chip.c */
+ return sysfs_emit(buf, "%s", dd->serial);
}
static DEVICE_ATTR_RO(serial);
@@ -598,9 +597,8 @@ static DEVICE_ATTR_WO(chip_reset);
* Convert the reported temperature from an integer (reported in
* units of 0.25C) to a floating point number.
*/
-#define temp2str(temp, buf, size, idx) \
- scnprintf((buf) + (idx), (size) - (idx), "%u.%02u ", \
- ((temp) >> 2), ((temp) & 0x3) * 25)
+#define temp_d(t) ((t) >> 2)
+#define temp_f(t) (((t)&0x3) * 25u)
/*
* Dump tempsense values, in decimal, to ease shell-scripts.
@@ -615,19 +613,17 @@ static ssize_t tempsense_show(struct device *device,
int ret;
ret = hfi1_tempsense_rd(dd, &temp);
- if (!ret) {
- int idx = 0;
-
- idx += temp2str(temp.curr, buf, PAGE_SIZE, idx);
- idx += temp2str(temp.lo_lim, buf, PAGE_SIZE, idx);
- idx += temp2str(temp.hi_lim, buf, PAGE_SIZE, idx);
- idx += temp2str(temp.crit_lim, buf, PAGE_SIZE, idx);
- idx += scnprintf(buf + idx, PAGE_SIZE - idx,
- "%u %u %u\n", temp.triggers & 0x1,
- temp.triggers & 0x2, temp.triggers & 0x4);
- ret = idx;
- }
- return ret;
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u.%02u %u.%02u %u.%02u %u.%02u %u %u %u\n",
+ temp_d(temp.curr), temp_f(temp.curr),
+ temp_d(temp.lo_lim), temp_f(temp.lo_lim),
+ temp_d(temp.hi_lim), temp_f(temp.hi_lim),
+ temp_d(temp.crit_lim), temp_f(temp.crit_lim),
+ temp.triggers & 0x1,
+ temp.triggers & 0x2,
+ temp.triggers & 0x4);
}
static DEVICE_ATTR_RO(tempsense);
@@ -817,7 +813,7 @@ static ssize_t sde_show_vl(struct sdma_engine *sde, char *buf)
if (vl < 0)
return vl;
- return snprintf(buf, PAGE_SIZE, "%d\n", vl);
+ return sysfs_emit(buf, "%d\n", vl);
}
static SDE_ATTR(cpu_list, S_IWUSR | S_IRUGO,
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 73d197e21730..92aa2a9b3b5a 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -2826,6 +2826,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
default:
break;
}
+ break;
default:
break;
}
@@ -3005,6 +3006,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
default:
break;
}
+ break;
default:
break;
}
@@ -3221,6 +3223,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
req = wqe_to_tid_req(prev);
if (req->ack_seg != req->total_segs)
goto interlock;
+ break;
default:
break;
}
@@ -3239,9 +3242,11 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
req = wqe_to_tid_req(prev);
if (req->ack_seg != req->total_segs)
goto interlock;
+ break;
default:
break;
}
+ break;
default:
break;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 75b06db60f7c..cc258edec331 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -31,14 +31,11 @@
*/
#include <linux/platform_device.h>
+#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include "hns_roce_device.h"
-#define HNS_ROCE_PORT_NUM_SHIFT 24
-#define HNS_ROCE_VLAN_SL_BIT_MASK 7
-#define HNS_ROCE_VLAN_SL_SHIFT 13
-
static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
{
u32 fl = ah_attr->grh.flow_label;
@@ -58,47 +55,41 @@ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
- const struct ib_gid_attr *gid_attr;
- struct device *dev = hr_dev->dev;
- struct hns_roce_ah *ah = to_hr_ah(ibah);
struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
- u16 vlan_id = 0xffff;
- bool vlan_en = false;
- int ret;
-
- gid_attr = ah_attr->grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
- if (ret)
- return ret;
-
- /* Get mac address */
- memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
+ struct hns_roce_ah *ah = to_hr_ah(ibah);
+ int ret = 0;
- if (vlan_id < VLAN_N_VID) {
- vlan_en = true;
- vlan_id |= (rdma_ah_get_sl(ah_attr) &
- HNS_ROCE_VLAN_SL_BIT_MASK) <<
- HNS_ROCE_VLAN_SL_SHIFT;
- }
+ if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata)
+ return -EOPNOTSUPP;
ah->av.port = rdma_ah_get_port_num(ah_attr);
ah->av.gid_index = grh->sgid_index;
- ah->av.vlan_id = vlan_id;
- ah->av.vlan_en = vlan_en;
- dev_dbg(dev, "gid_index = 0x%x,vlan_id = 0x%x\n", ah->av.gid_index,
- ah->av.vlan_id);
if (rdma_ah_get_static_rate(ah_attr))
ah->av.stat_rate = IB_RATE_10_GBPS;
- memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
- ah->av.sl = rdma_ah_get_sl(ah_attr);
+ ah->av.hop_limit = grh->hop_limit;
ah->av.flowlabel = grh->flow_label;
ah->av.udp_sport = get_ah_udp_sport(ah_attr);
+ ah->av.sl = rdma_ah_get_sl(ah_attr);
+ ah->av.tclass = get_tclass(grh);
- return 0;
+ memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
+ memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
+
+ /* HIP08 needs to record vlan info in Address Vector */
+ if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) {
+ ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
+ &ah->av.vlan_id, NULL);
+ if (ret)
+ return ret;
+
+ ah->av.vlan_en = ah->av.vlan_id < VLAN_N_VID;
+ }
+
+ return ret;
}
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index a6b23dec1adc..4bcaaa0524b1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -159,76 +159,96 @@ void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{
- struct device *dev = hr_dev->dev;
- u32 size = buf->size;
- int i;
+ struct hns_roce_buf_list *trunks;
+ u32 i;
- if (size == 0)
+ if (!buf)
return;
- buf->size = 0;
+ trunks = buf->trunk_list;
+ if (trunks) {
+ buf->trunk_list = NULL;
+ for (i = 0; i < buf->ntrunks; i++)
+ dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift,
+ trunks[i].buf, trunks[i].map);
- if (hns_roce_buf_is_direct(buf)) {
- dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
- } else {
- for (i = 0; i < buf->npages; ++i)
- if (buf->page_list[i].buf)
- dma_free_coherent(dev, 1 << buf->page_shift,
- buf->page_list[i].buf,
- buf->page_list[i].map);
- kfree(buf->page_list);
- buf->page_list = NULL;
+ kfree(trunks);
}
+
+ kfree(buf);
}
-int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
- struct hns_roce_buf *buf, u32 page_shift)
+/*
+ * Allocate the dma buffer for storing ROCEE table entries
+ *
+ * @size: required size
+ * @page_shift: the unit size in a continuous dma address range
+ * @flags: HNS_ROCE_BUF_ flags to control the allocation flow.
+ */
+struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
+ u32 page_shift, u32 flags)
{
- struct hns_roce_buf_list *buf_list;
- struct device *dev = hr_dev->dev;
- u32 page_size;
- int i;
+ u32 trunk_size, page_size, alloced_size;
+ struct hns_roce_buf_list *trunks;
+ struct hns_roce_buf *buf;
+ gfp_t gfp_flags;
+ u32 ntrunk, i;
/* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
- buf->page_shift = max_t(int, HNS_HW_PAGE_SHIFT, page_shift);
+ if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
+ return ERR_PTR(-EINVAL);
+
+ gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL;
+ buf = kzalloc(sizeof(*buf), gfp_flags);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ buf->page_shift = page_shift;
page_size = 1 << buf->page_shift;
- buf->npages = DIV_ROUND_UP(size, page_size);
-
- /* required size is not bigger than one trunk size */
- if (size <= max_direct) {
- buf->page_list = NULL;
- buf->direct.buf = dma_alloc_coherent(dev, size,
- &buf->direct.map,
- GFP_KERNEL);
- if (!buf->direct.buf)
- return -ENOMEM;
+
+ /* Calc the trunk size and num by required size and page_shift */
+ if (flags & HNS_ROCE_BUF_DIRECT) {
+ buf->trunk_shift = ilog2(ALIGN(size, PAGE_SIZE));
+ ntrunk = 1;
} else {
- buf_list = kcalloc(buf->npages, sizeof(*buf_list), GFP_KERNEL);
- if (!buf_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->npages; i++) {
- buf_list[i].buf = dma_alloc_coherent(dev, page_size,
- &buf_list[i].map,
- GFP_KERNEL);
- if (!buf_list[i].buf)
- break;
- }
+ buf->trunk_shift = ilog2(ALIGN(page_size, PAGE_SIZE));
+ ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
+ }
- if (i != buf->npages && i > 0) {
- while (i-- > 0)
- dma_free_coherent(dev, page_size,
- buf_list[i].buf,
- buf_list[i].map);
- kfree(buf_list);
- return -ENOMEM;
- }
- buf->page_list = buf_list;
+ trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags);
+ if (!trunks) {
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
}
- buf->size = size;
- return 0;
+ trunk_size = 1 << buf->trunk_shift;
+ alloced_size = 0;
+ for (i = 0; i < ntrunk; i++) {
+ trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size,
+ &trunks[i].map, gfp_flags);
+ if (!trunks[i].buf)
+ break;
+
+ alloced_size += trunk_size;
+ }
+
+ buf->ntrunks = i;
+
+ /* In nofail mode, it's only failed when the alloced size is 0 */
+ if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) {
+ for (i = 0; i < buf->ntrunks; i++)
+ dma_free_coherent(hr_dev->dev, trunk_size,
+ trunks[i].buf, trunks[i].map);
+
+ kfree(trunks);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ buf->npages = DIV_ROUND_UP(alloced_size, page_size);
+ buf->trunk_list = trunks;
+
+ return buf;
}
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
@@ -240,7 +260,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
end = start + buf_cnt;
if (end > buf->npages) {
dev_err(hr_dev->dev,
- "Failed to check kmem bufs, end %d + %d total %d!\n",
+ "failed to check kmem bufs, end %d + %d total %u!\n",
start, buf_cnt, buf->npages);
return -EINVAL;
}
@@ -262,7 +282,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
u64 addr;
if (page_shift < HNS_HW_PAGE_SHIFT) {
- dev_err(hr_dev->dev, "Failed to check umem page shift %d!\n",
+ dev_err(hr_dev->dev, "failed to check umem page shift %u!\n",
page_shift);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 455d533dd7c4..339e3fd98b0b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -36,9 +36,9 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
-#define CMD_POLL_TOKEN 0xffff
-#define CMD_MAX_NUM 32
-#define CMD_TOKEN_MASK 0x1f
+#define CMD_POLL_TOKEN 0xffff
+#define CMD_MAX_NUM 32
+#define CMD_TOKEN_MASK 0x1f
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier,
@@ -60,7 +60,7 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op,
- unsigned long timeout)
+ unsigned int timeout)
{
struct device *dev = hr_dev->dev;
int ret;
@@ -78,7 +78,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
- u8 op_modifier, u16 op, unsigned long timeout)
+ u8 op_modifier, u16 op, unsigned int timeout)
{
int ret;
@@ -93,8 +93,8 @@ static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
u64 out_param)
{
- struct hns_roce_cmd_context
- *context = &hr_dev->cmd.context[token & hr_dev->cmd.token_mask];
+ struct hns_roce_cmd_context *context =
+ &hr_dev->cmd.context[token % hr_dev->cmd.max_cmds];
if (token != context->token)
return;
@@ -108,7 +108,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op,
- unsigned long timeout)
+ unsigned int timeout)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct hns_roce_cmd_context *context;
@@ -159,13 +159,13 @@ out:
static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
- u8 op_modifier, u16 op, unsigned long timeout)
+ u8 op_modifier, u16 op, unsigned int timeout)
{
int ret;
down(&hr_dev->cmd.event_sem);
- ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op, timeout);
+ ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, in_modifier,
+ op_modifier, op, timeout);
up(&hr_dev->cmd.event_sem);
return ret;
@@ -173,7 +173,7 @@ static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
- unsigned long timeout)
+ unsigned int timeout)
{
int ret;
@@ -231,9 +231,8 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
int i;
- hr_cmd->context = kmalloc_array(hr_cmd->max_cmds,
- sizeof(*hr_cmd->context),
- GFP_KERNEL);
+ hr_cmd->context =
+ kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
if (!hr_cmd->context)
return -ENOMEM;
@@ -262,8 +261,8 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
hr_cmd->use_events = 0;
}
-struct hns_roce_cmd_mailbox
- *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
+struct hns_roce_cmd_mailbox *
+hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmd_mailbox *mailbox;
@@ -271,8 +270,8 @@ struct hns_roce_cmd_mailbox
if (!mailbox)
return ERR_PTR(-ENOMEM);
- mailbox->buf = dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL,
- &mailbox->dma);
+ mailbox->buf =
+ dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL, &mailbox->dma);
if (!mailbox->buf) {
kfree(mailbox);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 1915bacaded0..8025e7f657fa 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -141,10 +141,10 @@ enum {
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
- unsigned long timeout);
+ unsigned int timeout);
-struct hns_roce_cmd_mailbox
- *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);
+struct hns_roce_cmd_mailbox *
+hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox);
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index f5669ff8cfeb..5afee04fb02c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -38,21 +38,33 @@
#define roce_raw_write(value, addr) \
__raw_writel((__force u32)cpu_to_le32(value), (addr))
-#define roce_get_field(origin, mask, shift) \
- (((le32_to_cpu(origin)) & (mask)) >> (shift))
+#define roce_get_field(origin, mask, shift) \
+ ((le32_to_cpu(origin) & (mask)) >> (u32)(shift))
#define roce_get_bit(origin, shift) \
roce_get_field((origin), (1ul << (shift)), (shift))
-#define roce_set_field(origin, mask, shift, val) \
- do { \
- (origin) &= ~cpu_to_le32(mask); \
- (origin) |= cpu_to_le32(((u32)(val) << (shift)) & (mask)); \
+#define roce_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= ~cpu_to_le32(mask); \
+ (origin) |= cpu_to_le32(((u32)(val) << (u32)(shift)) & (mask)); \
} while (0)
-#define roce_set_bit(origin, shift, val) \
+#define roce_set_bit(origin, shift, val) \
roce_set_field((origin), (1ul << (shift)), (shift), (val))
+#define FIELD_LOC(field_type, field_h, field_l) field_type, field_h, field_l
+
+#define _hr_reg_enable(ptr, field_type, field_h, field_l) \
+ ({ \
+ const field_type *_ptr = ptr; \
+ *((__le32 *)_ptr + (field_h) / 32) |= \
+ cpu_to_le32(BIT((field_l) % 32)) + \
+ BUILD_BUG_ON_ZERO((field_h) != (field_l)); \
+ })
+
+#define hr_reg_enable(ptr, field) _hr_reg_enable(ptr, field)
+
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 809b22aa5056..8533fc2d8df2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -36,43 +36,42 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
-#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_cq_table *cq_table;
- struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle;
int ret;
ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
&dma_handle);
- if (ret < 1) {
- ibdev_err(ibdev, "Failed to find CQ mtr\n");
+ if (!ret) {
+ ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
return -EINVAL;
}
cq_table = &hr_dev->cq_table;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc CQ bitmap, err %d\n", ret);
+ ibdev_err(ibdev, "failed to alloc CQ bitmap, ret = %d.\n", ret);
return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
- ibdev_err(ibdev, "Failed to get CQ(0x%lx) context, err %d\n",
+ ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
hr_cq->cqn, ret);
goto err_out;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- ibdev_err(ibdev, "Failed to xa_store CQ\n");
+ ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
goto err_put;
}
@@ -91,7 +90,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
ibdev_err(ibdev,
- "Failed to send create cmd for CQ(0x%lx), err %d\n",
+ "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
hr_cq->cqn, ret);
goto err_xa;
}
@@ -147,7 +146,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_attr buf_attr = {};
- int err;
+ int ret;
buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
@@ -155,13 +154,13 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
buf_attr.region_count = 1;
buf_attr.fixed_page = true;
- err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
+ ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
udata, addr);
- if (err)
- ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err);
+ if (ret)
+ ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
- return err;
+ return ret;
}
static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
@@ -251,14 +250,17 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
u32 cq_entries = attr->cqe;
int ret;
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- ibdev_err(ibdev, "Failed to check CQ count %d max=%d\n",
+ ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
cq_entries, hr_dev->caps.max_cqes);
return -EINVAL;
}
if (vector >= hr_dev->caps.num_comp_vectors) {
- ibdev_err(ibdev, "Failed to check CQ vector=%d max=%d\n",
+ ibdev_err(ibdev, "failed to check CQ vector = %d, max = %d.\n",
vector, hr_dev->caps.num_comp_vectors);
return -EINVAL;
}
@@ -274,9 +276,9 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
if (udata) {
ret = ib_copy_from_udata(&ucmd, udata,
- min(sizeof(ucmd), udata->inlen));
+ min(udata->inlen, sizeof(ucmd)));
if (ret) {
- ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n",
+ ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n",
ret);
return ret;
}
@@ -286,19 +288,20 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
+ ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
return ret;
}
ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc CQ db, err %d\n", ret);
+ ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
goto err_cq_buf;
}
ret = alloc_cqc(hr_dev, hr_cq);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc CQ context, err %d\n", ret);
+ ibdev_err(ibdev,
+ "failed to alloc CQ context, ret = %d.\n", ret);
goto err_cq_db;
}
@@ -313,7 +316,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
if (udata) {
resp.cqn = hr_cq->cqn;
- ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
if (ret)
goto err_cqc;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index bff6abdccfb0..5cb7376ce978 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -95,8 +95,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
struct hns_roce_db *db, int order)
{
- int o;
- int i;
+ unsigned long o;
+ unsigned long i;
for (o = order; o <= 1; ++o) {
i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
@@ -154,8 +154,8 @@ out:
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
{
- int o;
- int i;
+ unsigned long o;
+ unsigned long i;
mutex_lock(&hr_dev->pgdir_mutex);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 6d2acff69f98..ad8253245a85 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -34,6 +34,7 @@
#define _HNS_ROCE_DEVICE_H
#include <rdma/ib_verbs.h>
+#include <rdma/hns-abi.h>
#define DRV_NAME "hns_roce"
@@ -117,6 +118,8 @@
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230
+#define HNS_ROCE_QP_BANK_NUM 8
+
/* The chip implementation of the consumer index is calculated
* according to twice the actual EQ depth
*/
@@ -129,15 +132,6 @@ enum {
SERV_TYPE_UD,
};
-enum {
- HNS_ROCE_QP_CAP_RQ_RECORD_DB = BIT(0),
- HNS_ROCE_QP_CAP_SQ_RECORD_DB = BIT(1),
-};
-
-enum hns_roce_cq_flags {
- HNS_ROCE_CQ_FLAG_RECORD_DB = BIT(0),
-};
-
enum hns_roce_qp_state {
HNS_ROCE_QP_STATE_RST,
HNS_ROCE_QP_STATE_INIT,
@@ -166,7 +160,6 @@ enum hns_roce_event {
/* 0x10 and 0x11 is unused in currently application case */
HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
HNS_ROCE_EVENT_TYPE_MB = 0x13,
- HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
HNS_ROCE_EVENT_TYPE_FLR = 0x15,
};
@@ -221,6 +214,8 @@ enum {
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
+ HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
+ HNS_ROCE_CAP_FLAG_STASH = BIT(17),
};
#define HNS_ROCE_DB_TYPE_COUNT 2
@@ -265,9 +260,6 @@ enum {
#define HNS_HW_PAGE_SHIFT 12
#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
-/* The minimum page count for hardware access page directly. */
-#define HNS_HW_DIRECT_PAGE_COUNT 2
-
struct hns_roce_uar {
u64 pfn;
unsigned long index;
@@ -318,7 +310,7 @@ struct hns_roce_hem_table {
};
struct hns_roce_buf_region {
- int offset; /* page offset */
+ u32 offset; /* page offset */
u32 count; /* page count */
int hopnum; /* addressing hop num */
};
@@ -338,10 +330,10 @@ struct hns_roce_buf_attr {
size_t size; /* region size */
int hopnum; /* multi-hop addressing hop num */
} region[HNS_ROCE_MAX_BT_REGION];
- int region_count; /* valid region count */
+ unsigned int region_count; /* valid region count */
unsigned int page_shift; /* buffer page shift */
bool fixed_page; /* decide page shift is fixed-size or maximum size */
- int user_access; /* umem access flag */
+ unsigned int user_access; /* umem access flag */
bool mtt_only; /* only alloc buffer-required MTT memory */
};
@@ -352,7 +344,7 @@ struct hns_roce_hem_cfg {
unsigned int buf_pg_shift; /* buffer page shift */
unsigned int buf_pg_count; /* buffer page count */
struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
- int region_count;
+ unsigned int region_count;
};
/* memory translate region */
@@ -400,7 +392,7 @@ struct hns_roce_wq {
u64 *wrid; /* Work request ID */
spinlock_t lock;
u32 wqe_cnt; /* WQE num */
- int max_gs;
+ u32 max_gs;
int offset;
int wqe_shift; /* WQE size */
u32 head;
@@ -419,11 +411,26 @@ struct hns_roce_buf_list {
dma_addr_t map;
};
+/*
+ * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
+ * dma address range.
+ *
+ * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
+ *
+ * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
+ * the allocated size is smaller than the required size.
+ */
+enum {
+ HNS_ROCE_BUF_DIRECT = BIT(0),
+ HNS_ROCE_BUF_NOSLEEP = BIT(1),
+ HNS_ROCE_BUF_NOFAIL = BIT(2),
+};
+
struct hns_roce_buf {
- struct hns_roce_buf_list direct;
- struct hns_roce_buf_list *page_list;
+ struct hns_roce_buf_list *trunk_list;
+ u32 ntrunks;
u32 npages;
- u32 size;
+ unsigned int trunk_shift;
unsigned int page_shift;
};
@@ -451,8 +458,8 @@ struct hns_roce_db {
} u;
dma_addr_t dma;
void *virt_addr;
- int index;
- int order;
+ unsigned long index;
+ unsigned long order;
};
struct hns_roce_cq {
@@ -500,8 +507,8 @@ struct hns_roce_srq {
u64 *wrid;
struct hns_roce_idx_que idx_que;
spinlock_t lock;
- int head;
- int tail;
+ u16 head;
+ u16 tail;
struct mutex mutex;
void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
};
@@ -510,13 +517,22 @@ struct hns_roce_uar_table {
struct hns_roce_bitmap bitmap;
};
+struct hns_roce_bank {
+ struct ida ida;
+ u32 inuse; /* Number of IDs allocated */
+ u32 min; /* Lowest ID to allocate. */
+ u32 max; /* Highest ID to allocate. */
+ u32 next; /* Next ID to allocate. */
+};
+
struct hns_roce_qp_table {
- struct hns_roce_bitmap bitmap;
struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table;
struct hns_roce_hem_table trrl_table;
struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex;
+ struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
+ struct mutex bank_mutex;
};
struct hns_roce_cq_table {
@@ -547,7 +563,7 @@ struct hns_roce_av {
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[ETH_ALEN];
u16 vlan_id;
- bool vlan_en;
+ u8 vlan_en;
};
struct hns_roce_ah {
@@ -619,10 +635,9 @@ enum {
struct hns_roce_work {
struct hns_roce_dev *hr_dev;
struct work_struct work;
- u32 qpn;
- u32 cqn;
int event_type;
int sub_type;
+ u32 queue_num;
};
struct hns_roce_qp {
@@ -690,28 +705,10 @@ struct hns_roce_aeqe {
__le32 asyn;
union {
struct {
- __le32 qp;
- u32 rsv0;
- u32 rsv1;
- } qp_event;
-
- struct {
- __le32 srq;
- u32 rsv0;
- u32 rsv1;
- } srq_event;
-
- struct {
- __le32 cq;
- u32 rsv0;
- u32 rsv1;
- } cq_event;
-
- struct {
- __le32 ceqe;
+ __le32 num;
u32 rsv0;
u32 rsv1;
- } ce_event;
+ } queue_event;
struct {
__le64 out_param;
@@ -730,11 +727,11 @@ struct hns_roce_eq {
int type_flag; /* Aeq:1 ceq:0 */
int eqn;
u32 entries;
- int log_entries;
+ u32 log_entries;
int eqe_size;
int irq;
int log_page_size;
- int cons_index;
+ u32 cons_index;
struct hns_roce_buf_list *buf_list;
int over_ignore;
int coalesce;
@@ -742,7 +739,7 @@ struct hns_roce_eq {
int hop_num;
struct hns_roce_mtr mtr;
u16 eq_max_cnt;
- int eq_period;
+ u32 eq_period;
int shift;
int event_type;
int sub_type;
@@ -765,8 +762,8 @@ struct hns_roce_caps {
u32 max_sq_inline;
u32 max_rq_sg;
u32 max_extend_sg;
- int num_qps;
- int reserved_qps;
+ u32 num_qps;
+ u32 reserved_qps;
int num_qpc_timer;
int num_cqc_timer;
int num_srqs;
@@ -778,7 +775,7 @@ struct hns_roce_caps {
u32 max_srq_desc_sz;
int max_qp_init_rdma;
int max_qp_dest_rdma;
- int num_cqs;
+ u32 num_cqs;
u32 max_cqes;
u32 min_cqes;
u32 min_wqes;
@@ -787,7 +784,7 @@ struct hns_roce_caps {
int num_aeq_vectors;
int num_comp_vectors;
int num_other_vectors;
- int num_mtpts;
+ u32 num_mtpts;
u32 num_mtt_segs;
u32 num_cqe_segs;
u32 num_srqwqe_segs;
@@ -825,6 +822,7 @@ struct hns_roce_caps {
u32 cqc_timer_bt_num;
u32 mpt_bt_num;
u32 sccc_bt_num;
+ u32 gmv_bt_num;
u32 qpc_ba_pg_sz;
u32 qpc_buf_pg_sz;
u32 qpc_hop_num;
@@ -864,6 +862,11 @@ struct hns_roce_caps {
u32 eqe_ba_pg_sz;
u32 eqe_buf_pg_sz;
u32 eqe_hop_num;
+ u32 gmv_entry_num;
+ u32 gmv_entry_sz;
+ u32 gmv_ba_pg_sz;
+ u32 gmv_buf_pg_sz;
+ u32 gmv_hop_num;
u32 sl_num;
u32 tsq_buf_pg_sz;
u32 tpq_buf_pg_sz;
@@ -898,7 +901,7 @@ struct hns_roce_hw {
int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event);
- int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
+ int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned int timeout);
int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
@@ -999,6 +1002,10 @@ struct hns_roce_dev {
struct hns_roce_eq_table eq_table;
struct hns_roce_hem_table qpc_timer_table;
struct hns_roce_hem_table cqc_timer_table;
+ /* GMV is the memory area that the driver allocates for the hardware
+ * to store SGID, SMAC and VLAN information.
+ */
+ struct hns_roce_hem_table gmv_table;
int cmd_mod;
int loop_idc;
@@ -1069,29 +1076,19 @@ static inline struct hns_roce_qp
return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
}
-static inline bool hns_roce_buf_is_direct(struct hns_roce_buf *buf)
+static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
+ unsigned int offset)
{
- if (buf->page_list)
- return false;
-
- return true;
+ return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
+ (offset & ((1 << buf->trunk_shift) - 1));
}
-static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
+static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
{
- if (hns_roce_buf_is_direct(buf))
- return (char *)(buf->direct.buf) + (offset & (buf->size - 1));
-
- return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
- (offset & ((1 << buf->page_shift) - 1));
-}
+ unsigned int offset = idx << buf->page_shift;
-static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, int idx)
-{
- if (hns_roce_buf_is_direct(buf))
- return buf->direct.map + ((dma_addr_t)idx << buf->page_shift);
- else
- return buf->page_list[idx].map;
+ return buf->trunk_list[offset >> buf->trunk_shift].map +
+ (offset & ((1 << buf->trunk_shift) - 1));
}
#define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
@@ -1132,6 +1129,14 @@ static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
return ilog2(to_hr_hem_entries_count(count, buf_shift));
}
+#define DSCP_SHIFT 2
+
+static inline u8 get_tclass(const struct ib_global_route *grh)
+{
+ return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
+ grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
+}
+
int hns_roce_init_uar_table(struct hns_roce_dev *dev);
int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
@@ -1155,7 +1160,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
struct hns_roce_mtr *mtr);
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t *pages, int page_cnt);
+ dma_addr_t *pages, unsigned int page_cnt);
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
@@ -1198,9 +1203,10 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
-int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
- struct ib_udata *udata);
+struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
@@ -1215,8 +1221,8 @@ int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
-int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
- struct hns_roce_buf *buf, u32 page_shift);
+struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
+ u32 page_shift, u32 flags);
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct hns_roce_buf *buf);
@@ -1238,10 +1244,10 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
-void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
-void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n);
-void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n);
-bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
+void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
+void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
+void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
+bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
@@ -1271,7 +1277,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
-int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
+u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 7487cf3d2c37..edc9d6b98d95 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -75,6 +75,9 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
case HEM_TYPE_CQC_TIMER:
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
+ case HEM_TYPE_GMV:
+ hop_num = hr_dev->caps.gmv_hop_num;
+ break;
default:
return false;
}
@@ -183,8 +186,16 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
mhop->hop_num = hr_dev->caps.srqc_hop_num;
break;
+ case HEM_TYPE_GMV:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.gmv_buf_pg_sz +
+ PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.gmv_ba_pg_sz +
+ PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.gmv_bt_num;
+ mhop->hop_num = hr_dev->caps.gmv_hop_num;
+ break;
default:
- dev_err(dev, "Table %d not support multi-hop addressing!\n",
+ dev_err(dev, "table %u not support multi-hop addressing!\n",
type);
return -EINVAL;
}
@@ -198,9 +209,9 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
{
struct device *dev = hr_dev->dev;
u32 chunk_ba_num;
+ u32 chunk_size;
u32 table_idx;
u32 bt_num;
- u32 chunk_size;
if (get_hem_table_config(hr_dev, mhop, table->type))
return -EINVAL;
@@ -232,8 +243,8 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
mhop->l0_idx = table_idx;
break;
default:
- dev_err(dev, "Table %d not support hop_num = %d!\n",
- table->type, mhop->hop_num);
+ dev_err(dev, "table %u not support hop_num = %u!\n",
+ table->type, mhop->hop_num);
return -EINVAL;
}
if (mhop->l0_idx >= mhop->ba_l0_num)
@@ -332,15 +343,15 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
{
spinlock_t *lock = &hr_dev->bt_cmd_lock;
struct device *dev = hr_dev->dev;
- long end;
- unsigned long flags;
struct hns_roce_hem_iter iter;
void __iomem *bt_cmd;
__le32 bt_cmd_val[2];
__le32 bt_cmd_h = 0;
+ unsigned long flags;
__le32 bt_cmd_l;
- u64 bt_ba;
int ret = 0;
+ u64 bt_ba;
+ long end;
/* Find the HEM(Hardware Entry Memory) entry */
unsigned long i = (obj & (table->num_obj - 1)) /
@@ -438,13 +449,13 @@ static int calc_hem_config(struct hns_roce_dev *hr_dev,
index->buf = l0_idx;
break;
default:
- ibdev_err(ibdev, "Table %d not support mhop.hop_num = %d!\n",
+ ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
table->type, mhop->hop_num);
return -EINVAL;
}
if (unlikely(index->buf >= table->num_hem)) {
- ibdev_err(ibdev, "Table %d exceed hem limt idx %llu,max %lu!\n",
+ ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
table->type, index->buf, table->num_hem);
return -EINVAL;
}
@@ -640,8 +651,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
struct device *dev = hr_dev->dev;
- int ret = 0;
unsigned long i;
+ int ret = 0;
if (hns_roce_check_whether_mhop(hr_dev, table->type))
return hns_roce_table_mhop_get(hr_dev, table, obj);
@@ -714,15 +725,15 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
step_idx = hop_num;
if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
- ibdev_warn(ibdev, "Clear hop%d HEM failed.\n", hop_num);
+ ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num);
if (index->inited & HEM_INDEX_L1)
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
- ibdev_warn(ibdev, "Clear HEM step 1 failed.\n");
+ ibdev_warn(ibdev, "failed to clear HEM step 1.\n");
if (index->inited & HEM_INDEX_L0)
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
- ibdev_warn(ibdev, "Clear HEM step 0 failed.\n");
+ ibdev_warn(ibdev, "failed to clear HEM step 0.\n");
}
}
@@ -789,14 +800,14 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_chunk *chunk;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
- void *addr = NULL;
unsigned long mhop_obj = obj;
unsigned long obj_per_chunk;
unsigned long idx_offset;
int offset, dma_offset;
+ void *addr = NULL;
+ u32 hem_idx = 0;
int length;
int i, j;
- u32 hem_idx = 0;
if (!table->lowmem)
return NULL;
@@ -876,7 +887,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
unsigned long buf_chunk_size;
unsigned long bt_chunk_size;
unsigned long bt_chunk_num;
- unsigned long num_bt_l0 = 0;
+ unsigned long num_bt_l0;
u32 hop_num;
if (get_hem_table_config(hr_dev, &mhop, type))
@@ -966,8 +977,8 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
{
struct hns_roce_hem_mhop mhop;
u32 buf_chunk_size;
- int i;
u64 obj;
+ int i;
if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
return;
@@ -1017,7 +1028,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{
- if (hr_dev->caps.srqc_entry_sz)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->srq_table.table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
@@ -1027,12 +1038,16 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.cqc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->cqc_timer_table);
- if (hr_dev->caps.sccc_sz)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table);
if (hr_dev->caps.trrl_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.trrl_table);
+
+ if (hr_dev->caps.gmv_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->gmv_table);
+
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
@@ -1234,7 +1249,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
}
if (offset < r->offset) {
- dev_err(hr_dev->dev, "invalid offset %d,min %d!\n",
+ dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
offset, r->offset);
return -EINVAL;
}
@@ -1298,8 +1313,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
const struct hns_roce_buf_region *regions,
int region_cnt)
{
- struct roce_hem_item *hem, *temp_hem, *root_hem;
struct list_head temp_list[HNS_ROCE_MAX_BT_REGION];
+ struct roce_hem_item *hem, *temp_hem, *root_hem;
const struct hns_roce_buf_region *r;
struct list_head temp_root;
struct list_head temp_btm;
@@ -1404,8 +1419,8 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
{
const struct hns_roce_buf_region *r;
int ofs, end;
- int ret;
int unit;
+ int ret;
int i;
if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index b34c940077bb..13fdeb3274e7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -47,6 +47,7 @@ enum {
HEM_TYPE_SCCC,
HEM_TYPE_QPC_TIMER,
HEM_TYPE_CQC_TIMER,
+ HEM_TYPE_GMV,
/* UNMAP HEM */
HEM_TYPE_MTT,
@@ -174,4 +175,4 @@ static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
}
-#endif /*_HNS_ROCE_HEM_H*/
+#endif /* _HNS_ROCE_HEM_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5f4d8a32ed6d..f68585ff8e8a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -239,7 +239,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
break;
}
- /*Ctrl field, ctrl set type: sig, solic, imm, fence */
+ /* Ctrl field, ctrl set type: sig, solic, imm, fence */
/* SO wait for conforming application scenarios */
ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
@@ -288,7 +288,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
ret = -EINVAL;
*bad_wr = wr;
dev_err(dev, "inline len(1-%d)=%d, illegal",
- ctrl->msg_length,
+ le32_to_cpu(ctrl->msg_length),
hr_dev->caps.max_sq_inline);
goto out;
}
@@ -300,7 +300,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
}
ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
} else {
- /*sqe num is two */
+ /* sqe num is two */
for (i = 0; i < wr->num_sge; i++)
set_data_seg(dseg + i, wr->sg_list + i);
@@ -353,8 +353,8 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
unsigned long flags = 0;
unsigned int wqe_idx;
int ret = 0;
- int nreq = 0;
- int i = 0;
+ int nreq;
+ int i;
u32 reg_val;
spin_lock_irqsave(&hr_qp->rq.lock, flags);
@@ -1165,7 +1165,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
}
raq->e_raq_buf->map = addr;
- /* Configure raq extended address. 48bit 4K align*/
+ /* Configure raq extended address. 48bit 4K align */
roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
/* Configure raq_shift */
@@ -1639,7 +1639,7 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
}
static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
- unsigned long timeout)
+ unsigned int timeout)
{
u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
unsigned long end;
@@ -2062,11 +2062,6 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
}
-static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
-{
- return -EOPNOTSUPP;
-}
-
static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
enum ib_cq_notify_flags flags)
{
@@ -2305,7 +2300,7 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct hns_roce_qp *cur_qp = NULL;
unsigned long flags;
int npolled;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&hr_cq->lock, flags);
@@ -2765,7 +2760,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_16,
QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
-
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
roce_set_field(context->qpc_bytes_4,
QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
@@ -3261,6 +3255,8 @@ static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
enum ib_qp_state cur_state,
enum ib_qp_state new_state)
{
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
@@ -3604,10 +3600,10 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
return 0;
}
-static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
+static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not)
{
roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
- (req_not << eq->log_entries), eq->doorbell);
+ (req_not << eq->log_entries), eq->doorbell);
}
static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
@@ -3687,10 +3683,10 @@ static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
int phy_port;
int qpn;
- qpn = roce_get_field(aeqe->event.qp_event.qp,
+ qpn = roce_get_field(aeqe->event.queue_event.num,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
- phy_port = roce_get_field(aeqe->event.qp_event.qp,
+ phy_port = roce_get_field(aeqe->event.queue_event.num,
HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
if (qpn <= 1)
@@ -3721,9 +3717,9 @@ static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
struct device *dev = &hr_dev->pdev->dev;
u32 cqn;
- cqn = roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
+ cqn = roce_get_field(aeqe->event.queue_event.num,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
@@ -3798,7 +3794,6 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
int event_type;
while ((aeqe = next_aeqe_sw_v1(eq))) {
-
/* Make sure we read the AEQ entry after we have checked the
* ownership bit
*/
@@ -3853,12 +3848,6 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
break;
- case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
- dev_warn(dev, "CEQ 0x%lx overflow.\n",
- roce_get_field(aeqe->event.ce_event.ceqe,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
- break;
default:
dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
event_type, eq->eqn, eq->cons_index);
@@ -3903,7 +3892,6 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
u32 cqn;
while ((ceqe = next_ceqe_sw_v1(eq))) {
-
/* Make sure we read CEQ entry after we have checked the
* ownership bit
*/
@@ -4129,7 +4117,7 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
struct device *dev = &hr_dev->pdev->dev;
dma_addr_t tmp_dma_addr;
- u32 eqcuridx_val = 0;
+ u32 eqcuridx_val;
u32 eqconsindx_val;
u32 eqshift_val;
__le32 tmp2 = 0;
@@ -4347,7 +4335,6 @@ static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
static const struct ib_device_ops hns_roce_v1_dev_ops = {
.destroy_qp = hns_roce_v1_destroy_qp,
- .modify_cq = hns_roce_v1_modify_cq,
.poll_cq = hns_roce_v1_poll_cq,
.post_recv = hns_roce_v1_post_recv,
.post_send = hns_roce_v1_post_send,
@@ -4367,7 +4354,6 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.set_mtu = hns_roce_v1_set_mtu,
.write_mtpt = hns_roce_v1_write_mtpt,
.write_cqc = hns_roce_v1_write_cqc,
- .modify_cq = hns_roce_v1_modify_cq,
.clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp,
.query_qp = hns_roce_v1_query_qp,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index ffd0156080f5..46ab0a321d21 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -419,7 +419,7 @@ struct hns_roce_wqe_data_seg {
struct hns_roce_wqe_raddr_seg {
__le32 rkey;
- __le32 len;/* reserved */
+ __le32 len; /* reserved */
__le64 raddr;
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0468028ffe39..833e1f259936 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -214,25 +214,20 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
return 0;
}
-static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
- unsigned int *sge_ind, unsigned int valid_num_sge)
+static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
+ unsigned int *sge_ind, unsigned int cnt)
{
struct hns_roce_v2_wqe_data_seg *dseg;
- unsigned int cnt = valid_num_sge;
- struct ib_sge *sge = wr->sg_list;
unsigned int idx = *sge_ind;
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
- cnt -= HNS_ROCE_SGE_IN_WQE;
- sge += HNS_ROCE_SGE_IN_WQE;
- }
-
while (cnt > 0) {
dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
- set_data_seg_v2(dseg, sge);
- idx++;
+ if (likely(sge->length)) {
+ set_data_seg_v2(dseg, sge);
+ idx++;
+ cnt--;
+ }
sge++;
- cnt--;
}
*sge_ind = idx;
@@ -340,7 +335,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
}
}
- set_extend_sge(qp, wr, sge_ind, valid_num_sge);
+ set_extend_sge(qp, wr->sg_list + i, sge_ind,
+ valid_num_sge - HNS_ROCE_SGE_IN_WQE);
}
roce_set_field(rc_sq_wqe->byte_16,
@@ -365,7 +361,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
hr_qp->state == IB_QPS_INIT ||
hr_qp->state == IB_QPS_RTR)) {
- ibdev_err(ibdev, "failed to post WQE, QP state %d!\n",
+ ibdev_err(ibdev, "failed to post WQE, QP state %hhu!\n",
hr_qp->state);
return -EINVAL;
} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
@@ -422,19 +418,54 @@ static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
return 0;
}
+static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
+ struct hns_roce_ah *ah)
+{
+ struct ib_device *ib_dev = ah->ibah.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+ roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
+
+ roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
+ roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
+ roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
+
+ if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
+ return -EINVAL;
+
+ roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
+ V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
+
+ ud_sq_wqe->sgid_index = ah->av.gid_index;
+
+ memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
+ memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return 0;
+
+ roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
+ ah->av.vlan_en);
+ roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
+
+ return 0;
+}
+
static inline int set_ud_wqe(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
void *wqe, unsigned int *sge_idx,
unsigned int owner_bit)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
unsigned int curr_idx = *sge_idx;
- int valid_num_sge;
+ unsigned int valid_num_sge;
u32 msg_len = 0;
- bool loopback;
- u8 *smac;
int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
@@ -444,38 +475,13 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
if (WARN_ON(ret))
return ret;
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
- V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
- V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
- V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
- V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
- roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
- V2_UD_SEND_WQE_BYTE_48_DMAC_4_S, ah->av.mac[4]);
- roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
- V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]);
-
- /* MAC loopback */
- smac = (u8 *)hr_dev->dev_addr[qp->port];
- loopback = ether_addr_equal_unaligned(ah->av.mac, smac) ? 1 : 0;
-
- roce_set_bit(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
-
ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
- /* Set sig attr */
roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
- (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+ !!(wr->send_flags & IB_SEND_SIGNALED));
- /* Set se attr */
roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
- (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
-
- roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
- owner_bit);
+ !!(wr->send_flags & IB_SEND_SOLICITED));
roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
@@ -488,36 +494,29 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
curr_idx & (qp->sge.sge_cnt - 1));
- roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
- V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
qp->qkey : ud_wr(wr)->remote_qkey);
roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
- roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
- V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
- roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
- V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
- roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
- V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
- roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
- V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
- roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
- V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
- roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M,
- V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port);
-
- roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
- ah->av.vlan_en ? 1 : 0);
- roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
- V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, ah->av.gid_index);
+ ret = fill_ud_av(ud_sq_wqe, ah);
+ if (ret)
+ return ret;
- memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2);
+ set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
- set_extend_sge(qp, wr, &curr_idx, valid_num_sge);
+ /*
+ * The pipeline can sequentially post all valid WQEs into WQ buffer,
+ * including new WQEs waiting for the doorbell to update the PI again.
+ * Therefore, the owner bit of WQE MUST be updated after all fields
+ * and extSGEs have been written into DDR instead of cache.
+ */
+ if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
+ dma_wmb();
*sge_idx = curr_idx;
+ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
+ owner_bit);
return 0;
}
@@ -591,9 +590,6 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
- owner_bit);
-
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
@@ -601,7 +597,18 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
&curr_idx, valid_num_sge);
+ /*
+ * The pipeline can sequentially post all valid WQEs into WQ buffer,
+ * including new WQEs waiting for the doorbell to update the PI again.
+ * Therefore, the owner bit of WQE MUST be updated after all fields
+ * and extSGEs have been written into DDR instead of cache.
+ */
+ if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
+ dma_wmb();
+
*sge_idx = curr_idx;
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
+ owner_bit);
return ret;
}
@@ -649,7 +656,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
unsigned int sge_idx;
unsigned int wqe_idx;
void *wqe = NULL;
- int nreq;
+ u32 nreq;
int ret;
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -673,7 +680,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
if (unlikely(wr->num_sge > qp->sq.max_gs)) {
- ibdev_err(ibdev, "num_sge=%d > qp->sq.max_gs=%d\n",
+ ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
wr->num_sge, qp->sq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
@@ -686,7 +693,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
/* Corresponding to the QP type, wqe process separately */
- if (ibqp->qp_type == IB_QPT_GSI)
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
else if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
@@ -758,7 +765,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
- ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
+ ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
@@ -827,7 +834,7 @@ static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
}
-static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n)
+static void *get_idx_buf(struct hns_roce_idx_que *idx_que, unsigned int n)
{
return hns_roce_buf_offset(idx_que->mtr.kmem,
n << idx_que->entry_shift);
@@ -868,12 +875,12 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_v2_db srq_db;
unsigned long flags;
+ unsigned int ind;
__le32 *srq_idx;
int ret = 0;
int wqe_idx;
void *wqe;
int nreq;
- int ind;
int i;
spin_lock_irqsave(&srq->lock, flags);
@@ -1018,8 +1025,8 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- unsigned long instance_stage; /* the current instance stage */
- unsigned long reset_stage; /* the current reset stage */
+ unsigned long instance_stage; /* the current instance stage */
+ unsigned long reset_stage; /* the current reset stage */
unsigned long reset_cnt;
bool sw_resetting;
bool hw_resetting;
@@ -1118,7 +1125,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
upper_32_bits(dma));
roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
- ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
+ (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
} else {
@@ -1126,7 +1133,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
upper_32_bits(dma));
roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
- ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
+ (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
}
@@ -1573,6 +1580,10 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
+ hr_dev->caps.gmv_bt_num = roce_get_field(req_b->gmv_idx_num,
+ PF_RES_DATA_5_PF_GMV_BT_NUM_M,
+ PF_RES_DATA_5_PF_GMV_BT_NUM_S);
+
return 0;
}
@@ -1896,11 +1907,20 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
+ caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
+ caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
+ caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
+ caps->gmv_entry_sz);
+ caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->gmv_ba_pg_sz = 0;
+ caps->gmv_buf_pg_sz = 0;
+ caps->gid_table_len[0] = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
+ caps->gmv_entry_sz);
}
}
-static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
- int *buf_page_size, int *bt_page_size, u32 hem_type)
+static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
+ u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
{
u64 obj_per_chunk;
u64 bt_chunk_size = PAGE_SIZE;
@@ -1930,8 +1950,8 @@ static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
break;
default:
- pr_err("Table %d not support hop_num = %d!\n", hem_type,
- hop_num);
+ pr_err("table %u not support hop_num = %u!\n", hem_type,
+ hop_num);
return;
}
@@ -2122,6 +2142,14 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
+ caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
+ caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
+ caps->gmv_entry_sz);
+ caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->gmv_ba_pg_sz = 0;
+ caps->gmv_buf_pg_sz = 0;
+ caps->gid_table_len[0] = caps->gmv_bt_num *
+ (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
}
calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
@@ -2371,10 +2399,10 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
u32 buf_chk_sz;
dma_addr_t t;
int func_num = 1;
- int pg_num_a;
- int pg_num_b;
- int pg_num;
- int size;
+ u32 pg_num_a;
+ u32 pg_num_b;
+ u32 pg_num;
+ u32 size;
int i;
switch (type) {
@@ -2423,7 +2451,6 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
if (i < (pg_num - 1))
entry[i].blk_ba1_nxt_ptr |=
(i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
-
}
link_tbl->npages = pg_num;
link_tbl->pg_sz = buf_chk_sz;
@@ -2465,24 +2492,13 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
link_tbl->table.map);
}
-static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+static int get_hem_table(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- int qpc_count, cqc_count;
- int ret, i;
-
- /* TSQ includes SQ doorbell and ack doorbell */
- ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
- if (ret) {
- dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
- return ret;
- }
-
- ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
- if (ret) {
- dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
- goto err_tpq_init_failed;
- }
+ unsigned int qpc_count;
+ unsigned int cqc_count;
+ unsigned int gmv_count;
+ int ret;
+ int i;
/* Alloc memory for QPC Timer buffer space chunk */
for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
@@ -2506,8 +2522,23 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
}
}
+ /* Alloc memory for GMV(GID/MAC/VLAN) table buffer space chunk */
+ for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
+ gmv_count++) {
+ ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to get gmv table, ret = %d.\n", ret);
+ goto err_gmv_failed;
+ }
+ }
+
return 0;
+err_gmv_failed:
+ for (i = 0; i < gmv_count; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
+
err_cqc_timer_failed:
for (i = 0; i < cqc_count; i++)
hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
@@ -2516,6 +2547,34 @@ err_qpc_timer_failed:
for (i = 0; i < qpc_count; i++)
hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
+ return ret;
+}
+
+static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ int ret;
+
+ /* TSQ includes SQ doorbell and ack doorbell */
+ ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to init TSQ, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to init TPQ, ret = %d.\n", ret);
+ goto err_tpq_init_failed;
+ }
+
+ ret = get_hem_table(hr_dev);
+ if (ret)
+ goto err_get_hem_table_failed;
+
+ return 0;
+
+err_get_hem_table_failed:
hns_roce_free_link_table(hr_dev, &priv->tpq);
err_tpq_init_failed:
@@ -2539,7 +2598,7 @@ static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
struct hns_roce_cmq_desc desc;
struct hns_roce_mbox_status *mb_st =
(struct hns_roce_mbox_status *)desc.data;
- enum hns_roce_cmd_return_status status;
+ int status;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
@@ -2610,7 +2669,7 @@ static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
}
static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
- unsigned long timeout)
+ unsigned int timeout)
{
struct device *dev = hr_dev->dev;
unsigned long end;
@@ -2637,14 +2696,27 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
return 0;
}
-static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
- int gid_index, const union ib_gid *gid,
- enum hns_roce_sgid_type sgid_type)
+static void copy_gid(void *dest, const union ib_gid *gid)
+{
+#define GID_SIZE 4
+ const union ib_gid *src = gid;
+ __le32 (*p)[GID_SIZE] = dest;
+ int i;
+
+ if (!gid)
+ src = &zgid;
+
+ for (i = 0; i < GID_SIZE; i++)
+ (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
+}
+
+static int config_sgid_table(struct hns_roce_dev *hr_dev,
+ int gid_index, const union ib_gid *gid,
+ enum hns_roce_sgid_type sgid_type)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cfg_sgid_tb *sgid_tb =
(struct hns_roce_cfg_sgid_tb *)desc.data;
- u32 *p;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
@@ -2653,19 +2725,54 @@ static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
- p = (u32 *)&gid->raw[0];
- sgid_tb->vf_sgid_l = cpu_to_le32(*p);
+ copy_gid(&sgid_tb->vf_sgid_l, gid);
- p = (u32 *)&gid->raw[4];
- sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
- p = (u32 *)&gid->raw[8];
- sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
+static int config_gmv_table(struct hns_roce_dev *hr_dev,
+ int gid_index, const union ib_gid *gid,
+ enum hns_roce_sgid_type sgid_type,
+ const struct ib_gid_attr *attr)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cfg_gmv_tb_a *tb_a =
+ (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
+ struct hns_roce_cfg_gmv_tb_b *tb_b =
+ (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
- p = (u32 *)&gid->raw[0xc];
- sgid_tb->vf_sgid_h = cpu_to_le32(*p);
+ u16 vlan_id = VLAN_CFI_MASK;
+ u8 mac[ETH_ALEN] = {};
+ int ret;
- return hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (gid) {
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
+ if (ret)
+ return ret;
+ }
+
+ hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
+
+ copy_gid(&tb_a->vf_sgid_l, gid);
+
+ roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M,
+ CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type);
+ roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S,
+ vlan_id < VLAN_CFI_MASK);
+ roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M,
+ CFG_GMV_TB_VF_VLAN_ID_S, vlan_id);
+
+ tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
+ roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M,
+ CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]);
+
+ roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M,
+ CFG_GMV_TB_SGID_IDX_S, gid_index);
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
}
static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
@@ -2675,23 +2782,24 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
int ret;
- if (!gid || !attr)
- return -EINVAL;
-
- if (attr->gid_type == IB_GID_TYPE_ROCE)
- sgid_type = GID_TYPE_FLAG_ROCE_V1;
-
- if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
- if (ipv6_addr_v4mapped((void *)gid))
- sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
- else
- sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
+ if (gid) {
+ if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ if (ipv6_addr_v4mapped((void *)gid))
+ sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
+ else
+ sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
+ } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
+ sgid_type = GID_TYPE_FLAG_ROCE_V1;
+ }
}
- ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
+ else
+ ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
+
if (ret)
- ibdev_err(&hr_dev->ib_dev,
- "failed to configure sgid table, ret = %d!\n",
+ ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
ret);
return ret;
@@ -2959,7 +3067,7 @@ static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
}
-static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
+static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
{
struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
@@ -3060,6 +3168,9 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
+ hr_reg_enable(cq_context, CQC_STASH);
+
cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
roce_set_field(cq_context->byte_16_hop_addr,
@@ -3303,7 +3414,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
int is_send;
u16 wqe_ctr;
u32 opcode;
- int qpn;
+ u32 qpn;
int ret;
/* Find cqe according to consumer index */
@@ -3572,7 +3683,7 @@ static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
break;
default:
dev_warn(hr_dev->dev,
- "Table %d not to be written by mailbox!\n", type);
+ "table %u not to be written by mailbox!\n", type);
return -EINVAL;
}
@@ -3583,9 +3694,25 @@ static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, u64 bt_ba,
u32 hem_type, int step_idx)
{
struct hns_roce_cmd_mailbox *mailbox;
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cfg_gmv_bt *gmv_bt =
+ (struct hns_roce_cfg_gmv_bt *)desc.data;
int ret;
int op;
+ if (hem_type == HEM_TYPE_GMV) {
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT,
+ false);
+
+ gmv_bt->gmv_ba_l = cpu_to_le32(bt_ba >> HNS_HW_PAGE_SHIFT);
+ gmv_bt->gmv_ba_h = cpu_to_le32(bt_ba >> (HNS_HW_PAGE_SHIFT +
+ 32));
+ gmv_bt->gmv_bt_idx = cpu_to_le32(obj /
+ (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz));
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+ }
+
op = get_op_for_set_hem(hr_dev, hem_type, step_idx);
if (op < 0)
return 0;
@@ -3683,24 +3810,20 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
case HEM_TYPE_CQC:
op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
break;
- case HEM_TYPE_SCCC:
- case HEM_TYPE_QPC_TIMER:
- case HEM_TYPE_CQC_TIMER:
- break;
case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
break;
+ case HEM_TYPE_SCCC:
+ case HEM_TYPE_QPC_TIMER:
+ case HEM_TYPE_CQC_TIMER:
+ case HEM_TYPE_GMV:
+ return 0;
default:
- dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
+ dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
table->type);
return 0;
}
- if (table->type == HEM_TYPE_SCCC ||
- table->type == HEM_TYPE_QPC_TIMER ||
- table->type == HEM_TYPE_CQC_TIMER)
- return 0;
-
op += step_idx;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -3851,9 +3974,14 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
- hr_qp->access_flags = attr->qp_access_flags;
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+
+ if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
+ return;
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
+ hr_reg_enable(&context->ext, QPCEX_STASH);
}
static void modify_qp_init_to_init(struct ib_qp *ibqp,
@@ -3874,51 +4002,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- if (attr_mask & IB_QP_ACCESS_FLAGS) {
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- !!(attr->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- !!(attr->qp_access_flags &
- IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- 0);
- roce_set_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_EXT_ATE_S,
- !!(attr->qp_access_flags &
- IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_EXT_ATE_S, 0);
- } else {
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- 0);
- roce_set_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_EXT_ATE_S,
- !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_EXT_ATE_S, 0);
- }
-
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
@@ -4328,7 +4411,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
if (ret) {
- ibdev_err(ibdev, "failed to config sq buf, ret %d\n", ret);
+ ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
return ret;
}
@@ -4421,7 +4504,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
IB_GID_TYPE_ROCE_UDP_ENCAP);
}
- if (vlan_id < VLAN_N_VID) {
+ /* Only HIP08 needs to set the vlan_en bits in QPC */
+ if (vlan_id < VLAN_N_VID &&
+ hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
@@ -4468,15 +4553,11 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
- if (is_udp)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
- else
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, grh->traffic_class);
-
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, get_tclass(&attr->ah_attr.grh));
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
+
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
V2_QPC_BYTE_28_FL_S, grh->flow_label);
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
@@ -4758,6 +4839,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
unsigned long rq_flag = 0;
int ret;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
/*
* In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context,
@@ -4818,7 +4902,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* SW pass context to HW */
ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
if (ret) {
- ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret);
+ ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
goto out;
}
@@ -4911,7 +4995,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
if (ret) {
- ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret);
+ ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
ret = -EINVAL;
goto out;
}
@@ -5026,13 +5110,15 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
unsigned long flags;
int ret = 0;
- if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
+ if ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
+ hr_qp->ibqp.qp_type == IB_QPT_UD) &&
+ hr_qp->state != IB_QPS_RESET) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
if (ret)
ibdev_err(ibdev,
- "failed to modify QP to RST, ret = %d\n",
+ "failed to modify QP to RST, ret = %d.\n",
ret);
}
@@ -5071,7 +5157,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
if (ret)
ibdev_err(&hr_dev->ib_dev,
- "failed to destroy QP 0x%06lx, ret = %d\n",
+ "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
hr_qp->qpn, ret);
hns_roce_qp_destroy(hr_dev, hr_qp, udata);
@@ -5094,7 +5180,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
- ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret);
+ ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
goto out;
}
@@ -5104,7 +5190,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
clr->qpn = cpu_to_le32(hr_qp->qpn);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
- ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret);
+ ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
goto out;
}
@@ -5353,7 +5439,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
ibdev_err(&hr_dev->ib_dev,
- "failed to process cmd when modifying CQ, ret = %d\n",
+ "failed to process cmd when modifying CQ, ret = %d.\n",
ret);
return ret;
@@ -5364,8 +5450,6 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
struct hns_roce_work *irq_work =
container_of(work, struct hns_roce_work, work);
struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
- u32 qpn = irq_work->qpn;
- u32 cqn = irq_work->cqn;
switch (irq_work->event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
@@ -5381,15 +5465,15 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
- qpn, irq_work->sub_type);
+ irq_work->queue_num, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
- qpn);
+ irq_work->queue_num);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
- qpn, irq_work->sub_type);
+ irq_work->queue_num, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
ibdev_warn(ibdev, "SRQ limit reach.\n");
@@ -5401,10 +5485,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
ibdev_err(ibdev, "SRQ catas error.\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn);
+ ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn);
+ ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
ibdev_warn(ibdev, "DB overflow.\n");
@@ -5420,8 +5504,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
}
static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq,
- u32 qpn, u32 cqn)
+ struct hns_roce_eq *eq, u32 queue_num)
{
struct hns_roce_work *irq_work;
@@ -5431,10 +5514,9 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
irq_work->hr_dev = hr_dev;
- irq_work->qpn = qpn;
- irq_work->cqn = cqn;
irq_work->event_type = eq->event_type;
irq_work->sub_type = eq->sub_type;
+ irq_work->queue_num = queue_num;
queue_work(hr_dev->irq_workq, &(irq_work->work));
}
@@ -5486,10 +5568,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
int aeqe_found = 0;
int event_type;
+ u32 queue_num;
int sub_type;
- u32 srqn;
- u32 qpn;
- u32 cqn;
while (aeqe) {
/* Make sure we read AEQ entry after we have checked the
@@ -5503,15 +5583,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
sub_type = roce_get_field(aeqe->asyn,
HNS_ROCE_V2_AEQE_SUB_TYPE_M,
HNS_ROCE_V2_AEQE_SUB_TYPE_S);
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
- cqn = roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
- srqn = roce_get_field(aeqe->event.srq_event.srq,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+ queue_num = roce_get_field(aeqe->event.queue_event.num,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
@@ -5522,17 +5596,15 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_qp_event(hr_dev, qpn, event_type);
+ hns_roce_qp_event(hr_dev, queue_num, event_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- hns_roce_srq_event(hr_dev, srqn, event_type);
+ hns_roce_srq_event(hr_dev, queue_num, event_type);
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- hns_roce_cq_event(hr_dev, cqn, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ hns_roce_cq_event(hr_dev, queue_num, event_type);
break;
case HNS_ROCE_EVENT_TYPE_MB:
hns_roce_cmd_event(hr_dev,
@@ -5540,8 +5612,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
aeqe->event.cmd.status,
le64_to_cpu(aeqe->event.cmd.out_param));
break;
- case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
- break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
case HNS_ROCE_EVENT_TYPE_FLR:
break;
default:
@@ -5558,7 +5629,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
if (eq->cons_index > (2 * eq->entries - 1))
eq->cons_index = 0;
- hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
+ hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
aeqe = next_aeqe_sw_v2(eq);
}
@@ -6193,6 +6264,7 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
/* required last entry */
{0, }
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index be7f2fe1e883..bdaccf86460d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -44,6 +44,7 @@
#define HNS_ROCE_VF_SMAC_NUM 32
#define HNS_ROCE_VF_SGID_NUM 32
#define HNS_ROCE_VF_SL_NUM 8
+#define HNS_ROCE_VF_GMV_BT_NUM 256
#define HNS_ROCE_V2_MAX_QP_NUM 0x100000
#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
@@ -89,6 +90,7 @@
#define HNS_ROCE_V2_SCCC_SZ 32
#define HNS_ROCE_V3_SCCC_SZ 64
+#define HNS_ROCE_V3_GMV_ENTRY_SZ 32
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
@@ -241,6 +243,8 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CLR_SCCC = 0x8509,
HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
HNS_ROCE_OPC_RESET_SCCC = 0x850b,
+ HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
+ HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
};
@@ -263,23 +267,24 @@ enum hns_roce_sgid_type {
};
struct hns_roce_v2_cq_context {
- __le32 byte_4_pg_ceqn;
- __le32 byte_8_cqn;
- __le32 cqe_cur_blk_addr;
- __le32 byte_16_hop_addr;
- __le32 cqe_nxt_blk_addr;
- __le32 byte_24_pgsz_addr;
- __le32 byte_28_cq_pi;
- __le32 byte_32_cq_ci;
- __le32 cqe_ba;
- __le32 byte_40_cqe_ba;
- __le32 byte_44_db_record;
- __le32 db_record_addr;
- __le32 byte_52_cqe_cnt;
- __le32 byte_56_cqe_period_maxcnt;
- __le32 cqe_report_timer;
- __le32 byte_64_se_cqe_idx;
+ __le32 byte_4_pg_ceqn;
+ __le32 byte_8_cqn;
+ __le32 cqe_cur_blk_addr;
+ __le32 byte_16_hop_addr;
+ __le32 cqe_nxt_blk_addr;
+ __le32 byte_24_pgsz_addr;
+ __le32 byte_28_cq_pi;
+ __le32 byte_32_cq_ci;
+ __le32 cqe_ba;
+ __le32 byte_40_cqe_ba;
+ __le32 byte_44_db_record;
+ __le32 db_record_addr;
+ __le32 byte_52_cqe_cnt;
+ __le32 byte_56_cqe_period_maxcnt;
+ __le32 cqe_report_timer;
+ __le32 byte_64_se_cqe_idx;
};
+
#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
@@ -356,6 +361,10 @@ struct hns_roce_v2_cq_context {
#define V2_CQC_BYTE_64_SE_CQE_IDX_S 0
#define V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0)
+#define CQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_cq_context, h, l)
+
+#define CQC_STASH CQC_FIELD_LOC(63, 63)
+
struct hns_roce_srq_context {
__le32 byte_4_srqn_srqst;
__le32 byte_8_limit_wl;
@@ -440,7 +449,7 @@ struct hns_roce_srq_context {
#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_S 1
#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_M GENMASK(31, 1)
-enum{
+enum {
V2_MPT_ST_VALID = 0x1,
V2_MPT_ST_FREE = 0x2,
};
@@ -457,68 +466,72 @@ enum hns_roce_v2_qp_state {
HNS_ROCE_QP_NUM_ST
};
+struct hns_roce_v2_qp_context_ex {
+ __le32 data[64];
+};
struct hns_roce_v2_qp_context {
- __le32 byte_4_sqpn_tst;
- __le32 wqe_sge_ba;
- __le32 byte_12_sq_hop;
- __le32 byte_16_buf_ba_pg_sz;
- __le32 byte_20_smac_sgid_idx;
- __le32 byte_24_mtu_tc;
- __le32 byte_28_at_fl;
- u8 dgid[GID_LEN_V2];
- __le32 dmac;
- __le32 byte_52_udpspn_dmac;
- __le32 byte_56_dqpn_err;
- __le32 byte_60_qpst_tempid;
- __le32 qkey_xrcd;
- __le32 byte_68_rq_db;
- __le32 rq_db_record_addr;
- __le32 byte_76_srqn_op_en;
- __le32 byte_80_rnr_rx_cqn;
- __le32 byte_84_rq_ci_pi;
- __le32 rq_cur_blk_addr;
- __le32 byte_92_srq_info;
- __le32 byte_96_rx_reqmsn;
- __le32 rq_nxt_blk_addr;
- __le32 byte_104_rq_sge;
- __le32 byte_108_rx_reqepsn;
- __le32 rq_rnr_timer;
- __le32 rx_msg_len;
- __le32 rx_rkey_pkt_info;
- __le64 rx_va;
- __le32 byte_132_trrl;
- __le32 trrl_ba;
- __le32 byte_140_raq;
- __le32 byte_144_raq;
- __le32 byte_148_raq;
- __le32 byte_152_raq;
- __le32 byte_156_raq;
- __le32 byte_160_sq_ci_pi;
- __le32 sq_cur_blk_addr;
- __le32 byte_168_irrl_idx;
- __le32 byte_172_sq_psn;
- __le32 byte_176_msg_pktn;
- __le32 sq_cur_sge_blk_addr;
- __le32 byte_184_irrl_idx;
- __le32 cur_sge_offset;
- __le32 byte_192_ext_sge;
- __le32 byte_196_sq_psn;
- __le32 byte_200_sq_max;
- __le32 irrl_ba;
- __le32 byte_208_irrl;
- __le32 byte_212_lsn;
- __le32 sq_timer;
- __le32 byte_220_retry_psn_msn;
- __le32 byte_224_retry_msg;
- __le32 rx_sq_cur_blk_addr;
- __le32 byte_232_irrl_sge;
- __le32 irrl_cur_sge_offset;
- __le32 byte_240_irrl_tail;
- __le32 byte_244_rnr_rxack;
- __le32 byte_248_ack_psn;
- __le32 byte_252_err_txcqn;
- __le32 byte_256_sqflush_rqcqe;
- __le32 ext[64];
+ __le32 byte_4_sqpn_tst;
+ __le32 wqe_sge_ba;
+ __le32 byte_12_sq_hop;
+ __le32 byte_16_buf_ba_pg_sz;
+ __le32 byte_20_smac_sgid_idx;
+ __le32 byte_24_mtu_tc;
+ __le32 byte_28_at_fl;
+ u8 dgid[GID_LEN_V2];
+ __le32 dmac;
+ __le32 byte_52_udpspn_dmac;
+ __le32 byte_56_dqpn_err;
+ __le32 byte_60_qpst_tempid;
+ __le32 qkey_xrcd;
+ __le32 byte_68_rq_db;
+ __le32 rq_db_record_addr;
+ __le32 byte_76_srqn_op_en;
+ __le32 byte_80_rnr_rx_cqn;
+ __le32 byte_84_rq_ci_pi;
+ __le32 rq_cur_blk_addr;
+ __le32 byte_92_srq_info;
+ __le32 byte_96_rx_reqmsn;
+ __le32 rq_nxt_blk_addr;
+ __le32 byte_104_rq_sge;
+ __le32 byte_108_rx_reqepsn;
+ __le32 rq_rnr_timer;
+ __le32 rx_msg_len;
+ __le32 rx_rkey_pkt_info;
+ __le64 rx_va;
+ __le32 byte_132_trrl;
+ __le32 trrl_ba;
+ __le32 byte_140_raq;
+ __le32 byte_144_raq;
+ __le32 byte_148_raq;
+ __le32 byte_152_raq;
+ __le32 byte_156_raq;
+ __le32 byte_160_sq_ci_pi;
+ __le32 sq_cur_blk_addr;
+ __le32 byte_168_irrl_idx;
+ __le32 byte_172_sq_psn;
+ __le32 byte_176_msg_pktn;
+ __le32 sq_cur_sge_blk_addr;
+ __le32 byte_184_irrl_idx;
+ __le32 cur_sge_offset;
+ __le32 byte_192_ext_sge;
+ __le32 byte_196_sq_psn;
+ __le32 byte_200_sq_max;
+ __le32 irrl_ba;
+ __le32 byte_208_irrl;
+ __le32 byte_212_lsn;
+ __le32 sq_timer;
+ __le32 byte_220_retry_psn_msn;
+ __le32 byte_224_retry_msg;
+ __le32 rx_sq_cur_blk_addr;
+ __le32 byte_232_irrl_sge;
+ __le32 irrl_cur_sge_offset;
+ __le32 byte_240_irrl_tail;
+ __le32 byte_244_rnr_rxack;
+ __le32 byte_248_ack_psn;
+ __le32 byte_252_err_txcqn;
+ __le32 byte_256_sqflush_rqcqe;
+
+ struct hns_roce_v2_qp_context_ex ext;
};
#define V2_QPC_BYTE_4_TST_S 0
@@ -887,6 +900,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
+#define QPCEX_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context_ex, h, l)
+
+#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
+
#define V2_QP_RWE_S 1 /* rdma write enable */
#define V2_QP_RRE_S 2 /* rdma read enable */
#define V2_QP_ATE_S 3 /* rdma atomic enable */
@@ -1073,12 +1090,13 @@ struct hns_roce_v2_ud_send_wqe {
__le32 byte_32;
__le32 byte_36;
__le32 byte_40;
- __le32 dmac;
- __le32 byte_48;
+ u8 dmac[ETH_ALEN];
+ u8 sgid_index;
+ u8 smac_index;
u8 dgid[GID_LEN_V2];
-
};
-#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0
+
+#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0
#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
#define V2_UD_SEND_WQE_BYTE_4_OWNER_S 7
@@ -1117,37 +1135,10 @@ struct hns_roce_v2_ud_send_wqe {
#define V2_UD_SEND_WQE_BYTE_40_SL_S 20
#define V2_UD_SEND_WQE_BYTE_40_SL_M GENMASK(23, 20)
-#define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24
-#define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24)
-
#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30
#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
-#define V2_UD_SEND_WQE_DMAC_0_S 0
-#define V2_UD_SEND_WQE_DMAC_0_M GENMASK(7, 0)
-
-#define V2_UD_SEND_WQE_DMAC_1_S 8
-#define V2_UD_SEND_WQE_DMAC_1_M GENMASK(15, 8)
-
-#define V2_UD_SEND_WQE_DMAC_2_S 16
-#define V2_UD_SEND_WQE_DMAC_2_M GENMASK(23, 16)
-
-#define V2_UD_SEND_WQE_DMAC_3_S 24
-#define V2_UD_SEND_WQE_DMAC_3_M GENMASK(31, 24)
-
-#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_S 0
-#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_M GENMASK(7, 0)
-
-#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_S 8
-#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_M GENMASK(15, 8)
-
-#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S 16
-#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M GENMASK(23, 16)
-
-#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_S 24
-#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_M GENMASK(31, 24)
-
struct hns_roce_v2_rc_send_wqe {
__le32 byte_4;
__le32 msg_len;
@@ -1334,7 +1325,7 @@ struct hns_roce_pf_res_b {
__le32 sgid_idx_num;
__le32 qid_idx_sl_num;
__le32 sccc_bt_idx_num;
- __le32 rsv;
+ __le32 gmv_idx_num;
};
#define PF_RES_DATA_1_PF_SMAC_IDX_S 0
@@ -1361,6 +1352,12 @@ struct hns_roce_pf_res_b {
#define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
#define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
+#define PF_RES_DATA_5_PF_GMV_BT_IDX_S 0
+#define PF_RES_DATA_5_PF_GMV_BT_IDX_M GENMASK(7, 0)
+
+#define PF_RES_DATA_5_PF_GMV_BT_NUM_S 8
+#define PF_RES_DATA_5_PF_GMV_BT_NUM_M GENMASK(16, 8)
+
struct hns_roce_pf_timer_res_a {
__le32 rsv0;
__le32 qpc_timer_bt_idx_num;
@@ -1425,7 +1422,7 @@ struct hns_roce_vf_res_b {
__le32 vf_sgid_idx_num;
__le32 vf_qid_idx_sl_num;
__le32 vf_sccc_idx_num;
- __le32 rsv1;
+ __le32 vf_gmv_idx_num;
};
#define VF_RES_B_DATA_0_VF_ID_S 0
@@ -1455,6 +1452,12 @@ struct hns_roce_vf_res_b {
#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
+#define VF_RES_B_DATA_5_VF_GMV_BT_IDX_S 0
+#define VF_RES_B_DATA_5_VF_GMV_BT_IDX_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_5_VF_GMV_BT_NUM_S 16
+#define VF_RES_B_DATA_5_VF_GMV_BT_NUM_M GENMASK(24, 16)
+
struct hns_roce_vf_switch {
__le32 rocee_sel;
__le32 fun_id;
@@ -1577,6 +1580,46 @@ struct hns_roce_cfg_smac_tb {
#define CFG_SMAC_TB_VF_SMAC_H_S 0
#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0)
+struct hns_roce_cfg_gmv_bt {
+ __le32 gmv_ba_l;
+ __le32 gmv_ba_h;
+ __le32 gmv_bt_idx;
+ __le32 rsv[3];
+};
+
+#define CFG_GMV_BA_H_S 0
+#define CFG_GMV_BA_H_M GENMASK(19, 0)
+
+struct hns_roce_cfg_gmv_tb_a {
+ __le32 vf_sgid_l;
+ __le32 vf_sgid_ml;
+ __le32 vf_sgid_mh;
+ __le32 vf_sgid_h;
+ __le32 vf_sgid_type_vlan;
+ __le32 resv;
+};
+
+#define CFG_GMV_TB_SGID_IDX_S 0
+#define CFG_GMV_TB_SGID_IDX_M GENMASK(7, 0)
+
+#define CFG_GMV_TB_VF_SGID_TYPE_S 0
+#define CFG_GMV_TB_VF_SGID_TYPE_M GENMASK(1, 0)
+
+#define CFG_GMV_TB_VF_VLAN_EN_S 2
+
+#define CFG_GMV_TB_VF_VLAN_ID_S 16
+#define CFG_GMV_TB_VF_VLAN_ID_M GENMASK(27, 16)
+
+struct hns_roce_cfg_gmv_tb_b {
+ __le32 vf_smac_l;
+ __le32 vf_smac_h;
+ __le32 table_idx_rsv;
+ __le32 resv[3];
+};
+
+#define CFG_GMV_TB_SMAC_H_S 0
+#define CFG_GMV_TB_SMAC_H_M GENMASK(15, 0)
+
#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5
struct hns_roce_query_pf_caps_a {
u8 number_ports;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index afeffafc59f9..d9179bae4989 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -33,13 +33,13 @@
#include <linux/acpi.h>
#include <linux/of_platform.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
-#include <rdma/hns-abi.h>
#include "hns_roce_hem.h"
/**
@@ -53,7 +53,7 @@
* GID[0][0], GID[1][0],.....GID[N - 1][0],
* And so on
*/
-int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
+u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
{
return gid_index * hr_dev->caps.num_ports + port;
}
@@ -61,7 +61,10 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{
u8 phy_port;
- u32 i = 0;
+ u32 i;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return 0;
if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
return 0;
@@ -90,14 +93,13 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
- struct ib_gid_attr zattr = {};
u8 port = attr->port_num - 1;
int ret;
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
- ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL);
return ret;
}
@@ -325,7 +327,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
resp.cqe_size = hr_dev->caps.cqe_sz;
- ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
if (ret)
goto error_fail_copy_to_udata;
@@ -421,6 +424,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.alloc_pd = hns_roce_alloc_pd,
.alloc_ucontext = hns_roce_alloc_ucontext,
.create_ah = hns_roce_create_ah,
+ .create_user_ah = hns_roce_create_ah,
.create_cq = hns_roce_create_cq,
.create_qp = hns_roce_create_qp,
.dealloc_pd = hns_roce_dealloc_pd,
@@ -491,36 +495,13 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
- ib_dev->uverbs_cmd_mask =
- (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ULL << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ULL << IB_USER_VERBS_CMD_REG_MR) |
- (1ULL << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ULL << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ULL << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
-
- ib_dev->uverbs_ex_cmd_mask |= (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
- ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
- }
/* MW */
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
- ib_dev->uverbs_cmd_mask |=
- (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
- (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
- }
/* FRMR */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
@@ -528,12 +509,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
/* SRQ */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
- ib_dev->uverbs_cmd_mask |=
- (1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV);
ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
}
@@ -580,8 +555,8 @@ error_failed_setup_mtu_mac:
static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
{
- int ret;
struct device *dev = hr_dev->dev;
+ int ret;
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
@@ -631,7 +606,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
goto err_unmap_trrl;
}
- if (hr_dev->caps.srqc_entry_sz) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
HEM_TYPE_SRQC,
hr_dev->caps.srqc_entry_sz,
@@ -643,7 +618,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
}
}
- if (hr_dev->caps.sccc_sz) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table,
HEM_TYPE_SCCC,
@@ -680,18 +655,35 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
}
}
+ if (hr_dev->caps.gmv_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
+ HEM_TYPE_GMV,
+ hr_dev->caps.gmv_entry_sz,
+ hr_dev->caps.gmv_entry_num, 1);
+ if (ret) {
+ dev_err(dev,
+ "failed to init gmv table memory, ret = %d\n",
+ ret);
+ goto err_unmap_cqc_timer;
+ }
+ }
+
return 0;
+err_unmap_cqc_timer:
+ if (hr_dev->caps.cqc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cqc_timer_table);
+
err_unmap_qpc_timer:
if (hr_dev->caps.qpc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
err_unmap_ctx:
- if (hr_dev->caps.sccc_sz)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table);
err_unmap_srq:
- if (hr_dev->caps.srqc_entry_sz)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
err_unmap_cq:
@@ -721,8 +713,8 @@ err_unmap_dmpt:
*/
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{
- int ret;
struct device *dev = hr_dev->dev;
+ int ret;
spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
@@ -846,8 +838,8 @@ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
int hns_roce_init(struct hns_roce_dev *hr_dev)
{
- int ret;
struct device *dev = hr_dev->dev;
+ int ret;
if (hr_dev->hw->reset) {
ret = hr_dev->hw->reset(hr_dev, true);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 7f81a695e9af..1bcffd93ff3e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -167,10 +167,10 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr)
{
- int ret;
unsigned long mtpt_idx = key_to_hw_index(mr->key);
- struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = hr_dev->dev;
+ int ret;
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -185,14 +185,14 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
else
ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) {
- dev_err(dev, "Write mtpt fail!\n");
+ dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
goto err_page;
}
ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
- dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
+ dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
goto err_page;
}
@@ -328,9 +328,10 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
return ret;
}
-int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
- struct ib_udata *udata)
+struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct ib_device *ib_dev = &hr_dev->ib_dev;
@@ -341,11 +342,11 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
int ret;
if (!mr->enabled)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
+ return ERR_CAST(mailbox);
mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
@@ -390,12 +391,12 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- return 0;
+ return NULL;
free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- return ret;
+ return ERR_PTR(ret);
}
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -495,7 +496,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
if (ret < 1) {
- ibdev_err(ibdev, "failed to store sg pages %d %d, cnt = %d.\n",
+ ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
goto err_page_list;
}
@@ -509,7 +510,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
ret = 0;
} else {
- mr->pbl_mtr.hem_cfg.buf_pg_shift = ilog2(ibmr->page_size);
+ mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
ret = mr->npages;
}
@@ -695,15 +696,6 @@ static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
return size;
}
-static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
- unsigned int page_shift)
-{
- if (is_direct)
- return ALIGN(alloc_size, 1 << page_shift);
- else
- return HNS_HW_DIRECT_PAGE_COUNT << page_shift;
-}
-
/*
* check the given pages in continuous address space
* Returns 0 on success, or the error page num.
@@ -732,7 +724,6 @@ static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
/* release kernel buffers */
if (mtr->kmem) {
hns_roce_buf_free(hr_dev, mtr->kmem);
- kfree(mtr->kmem);
mtr->kmem = NULL;
}
}
@@ -744,13 +735,12 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned int best_pg_shift;
int all_pg_count = 0;
- size_t direct_size;
size_t total_size;
int ret;
total_size = mtr_bufs_size(buf_attr);
if (total_size < 1) {
- ibdev_err(ibdev, "Failed to check mtr size\n");
+ ibdev_err(ibdev, "failed to check mtr size\n.");
return -EINVAL;
}
@@ -762,7 +752,7 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
buf_attr->user_access);
if (IS_ERR_OR_NULL(mtr->umem)) {
- ibdev_err(ibdev, "Failed to get umem, ret %ld\n",
+ ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
PTR_ERR(mtr->umem));
return -ENOMEM;
}
@@ -780,19 +770,16 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
ret = 0;
} else {
mtr->umem = NULL;
- mtr->kmem = kzalloc(sizeof(*mtr->kmem), GFP_KERNEL);
- if (!mtr->kmem) {
- ibdev_err(ibdev, "Failed to alloc kmem\n");
- return -ENOMEM;
- }
- direct_size = mtr_kmem_direct_size(is_direct, total_size,
- buf_attr->page_shift);
- ret = hns_roce_buf_alloc(hr_dev, total_size, direct_size,
- mtr->kmem, buf_attr->page_shift);
- if (ret) {
- ibdev_err(ibdev, "Failed to alloc kmem, ret %d\n", ret);
- goto err_alloc_mem;
+ mtr->kmem =
+ hns_roce_buf_alloc(hr_dev, total_size,
+ buf_attr->page_shift,
+ is_direct ? HNS_ROCE_BUF_DIRECT : 0);
+ if (IS_ERR(mtr->kmem)) {
+ ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
+ PTR_ERR(mtr->kmem));
+ return PTR_ERR(mtr->kmem);
}
+
best_pg_shift = buf_attr->page_shift;
all_pg_count = mtr->kmem->npages;
}
@@ -800,7 +787,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
/* must bigger than minimum hardware page shift */
if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
ret = -EINVAL;
- ibdev_err(ibdev, "Failed to check mtr page shift %d count %d\n",
+ ibdev_err(ibdev,
+ "failed to check mtr, page shift = %u count = %d.\n",
best_pg_shift, all_pg_count);
goto err_alloc_mem;
}
@@ -841,12 +829,12 @@ static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
}
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t *pages, int page_cnt)
+ dma_addr_t *pages, unsigned int page_cnt)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r;
+ unsigned int i;
int err;
- int i;
/*
* Only use the first page address as root ba when hopnum is 0, this
@@ -862,7 +850,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
if (r->offset + r->count > page_cnt) {
err = -EINVAL;
ibdev_err(ibdev,
- "Failed to check mtr%d end %d + %d, max %d\n",
+ "failed to check mtr%u end %u + %u, max %u.\n",
i, r->offset, r->count, page_cnt);
return err;
}
@@ -870,7 +858,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
if (err) {
ibdev_err(ibdev,
- "Failed to map mtr%d offset %d, err %d\n",
+ "failed to map mtr%u offset %u, ret = %d.\n",
i, r->offset, err);
return err;
}
@@ -883,13 +871,12 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
{
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
+ int mtt_count, left;
int start_index;
- int mtt_count;
int total = 0;
__le64 *mtts;
- int npage;
+ u32 npage;
u64 addr;
- int left;
if (!mtt_buf || mtt_max < 1)
goto done;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 98f69496adb4..cca818d05a8f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -32,7 +32,6 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
-#include <uapi/rdma/hns-abi.h>
#include "hns_roce_device.h"
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
@@ -65,21 +64,22 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) {
- ibdev_err(ib_dev, "failed to alloc pd, ret = %d\n", ret);
+ ibdev_err(ib_dev, "failed to alloc pd, ret = %d.\n", ret);
return ret;
}
if (udata) {
- struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn};
+ struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn};
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ if (ret) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
- ibdev_err(ib_dev, "failed to copy to udata\n");
- return -EFAULT;
+ ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret);
}
}
- return 0;
+ return ret;
}
int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 6c081dd985fc..1116371adf74 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -39,7 +39,6 @@
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
-#include <rdma/hns-abi.h>
static void flush_work_handle(struct work_struct *work)
{
@@ -114,8 +113,8 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
enum hns_roce_event type)
{
- struct ib_event event;
struct ib_qp *ibqp = &hr_qp->ibqp;
+ struct ib_event event;
if (ibqp->event_handler) {
event.device = ibqp->device;
@@ -154,9 +153,50 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
}
}
+static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank)
+{
+ u32 least_load = bank[0].inuse;
+ u8 bankid = 0;
+ u32 bankcnt;
+ u8 i;
+
+ for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) {
+ bankcnt = bank[i].inuse;
+ if (bankcnt < least_load) {
+ least_load = bankcnt;
+ bankid = i;
+ }
+ }
+
+ return bankid;
+}
+
+static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
+ unsigned long *qpn)
+{
+ int id;
+
+ id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL);
+ if (id < 0) {
+ id = ida_alloc_range(&bank->ida, bank->min, bank->max,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+ }
+
+ /* the QPN should keep increasing until the max value is reached. */
+ bank->next = (id + 1) > bank->max ? bank->min : id + 1;
+
+ /* the lower 3 bits is bankid */
+ *qpn = (id << 3) | bankid;
+
+ return 0;
+}
static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
unsigned long num = 0;
+ u8 bankid;
int ret;
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
@@ -169,13 +209,21 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->doorbell_qpn = 1;
} else {
- ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap,
- 1, 1, &num);
+ mutex_lock(&qp_table->bank_mutex);
+ bankid = get_least_load_bankid_for_qp(qp_table->bank);
+
+ ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
+ &num);
if (ret) {
- ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n");
- return -ENOMEM;
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to alloc QPN, ret = %d\n", ret);
+ mutex_unlock(&qp_table->bank_mutex);
+ return ret;
}
+ qp_table->bank[bankid].inuse++;
+ mutex_unlock(&qp_table->bank_mutex);
+
hr_qp->doorbell_qpn = (u32)num;
}
@@ -286,7 +334,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
}
}
- if (hr_dev->caps.sccc_sz) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
/* Alloc memory for SCC CTX */
ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
hr_qp->qpn);
@@ -340,9 +388,15 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
}
+static inline u8 get_qp_bankid(unsigned long qpn)
+{
+ /* The lower 3 bits of QPN are used to hash to different banks */
+ return (u8)(qpn & GENMASK(2, 0));
+}
+
static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
- struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ u8 bankid;
if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
return;
@@ -350,7 +404,13 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (hr_qp->qpn < hr_dev->caps.reserved_qps)
return;
- hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
+ bankid = get_qp_bankid(hr_qp->qpn);
+
+ ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
+
+ mutex_lock(&hr_dev->qp_table.bank_mutex);
+ hr_dev->qp_table.bank[bankid].inuse--;
+ mutex_unlock(&hr_dev->qp_table.bank_mutex);
}
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
@@ -404,37 +464,43 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
return 0;
}
-static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
- struct hns_roce_qp *hr_qp,
- struct ib_qp_cap *cap)
+static u32 get_wqe_ext_sge_cnt(struct hns_roce_qp *qp)
{
- u32 cnt;
+ /* GSI/UD QP only has extended sge */
+ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD)
+ return qp->sq.max_gs;
- cnt = max(1U, cap->max_send_sge);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- hr_qp->sq.max_gs = roundup_pow_of_two(cnt);
- hr_qp->sge.sge_cnt = 0;
+ if (qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
+ return qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE;
- return 0;
- }
+ return 0;
+}
- hr_qp->sq.max_gs = cnt;
+static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
+ struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap)
+{
+ u32 total_sge_cnt;
+ u32 wqe_sge_cnt;
- /* UD sqwqe's sge use extend sge */
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
- hr_qp->ibqp.qp_type == IB_QPT_UD) {
- cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs);
- } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) {
- cnt = roundup_pow_of_two(sq_wqe_cnt *
- (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
- } else {
- cnt = 0;
+ hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
+
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE;
+ return;
}
- hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
- hr_qp->sge.sge_cnt = cnt;
+ hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
- return 0;
+ wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
+
+ /* If the number of extended sge is not zero, they MUST use the
+ * space of HNS_HW_PAGE_SIZE at least.
+ */
+ if (wqe_sge_cnt) {
+ total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt);
+ hr_qp->sge.sge_cnt = max(total_sge_cnt,
+ (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE);
+ }
}
static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
@@ -447,12 +513,12 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
/* Sanity check SQ size before proceeding */
if (ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
- ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n");
+ ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
return -EINVAL;
}
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
- ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n",
+ ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
cap->max_send_sge);
return -EINVAL;
}
@@ -479,9 +545,7 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
return ret;
}
- ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
- if (ret)
- return ret;
+ set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
hr_qp->sq.wqe_cnt = cnt;
@@ -546,7 +610,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
{
struct ib_device *ibdev = &hr_dev->ib_dev;
u32 cnt;
- int ret;
if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg) {
@@ -558,7 +621,7 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
if (cnt > hr_dev->caps.max_wqes) {
- ibdev_err(ibdev, "failed to check WQE num, WQE num = %d.\n",
+ ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
cnt);
return -EINVAL;
}
@@ -566,9 +629,7 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
hr_qp->sq.wqe_cnt = cnt;
- ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
- if (ret)
- return ret;
+ set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
/* sync the parameters of kernel QP to user's configuration */
cap->max_send_wr = cnt;
@@ -725,13 +786,17 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
+
if (udata) {
if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
&hr_qp->sdb);
if (ret) {
ibdev_err(ibdev,
- "Failed to map user SQ doorbell\n");
+ "failed to map user SQ doorbell, ret = %d.\n",
+ ret);
goto err_out;
}
hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
@@ -743,7 +808,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
&hr_qp->rdb);
if (ret) {
ibdev_err(ibdev,
- "Failed to map user RQ doorbell\n");
+ "failed to map user RQ doorbell, ret = %d.\n",
+ ret);
goto err_sdb;
}
hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
@@ -760,7 +826,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
if (ret) {
ibdev_err(ibdev,
- "Failed to alloc kernel RQ doorbell\n");
+ "failed to alloc kernel RQ doorbell, ret = %d.\n",
+ ret);
goto err_out;
}
*hr_qp->rdb.db_record = 0;
@@ -803,14 +870,14 @@ static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(sq_wrid)) {
- ibdev_err(ibdev, "Failed to alloc SQ wrid\n");
+ ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
return -ENOMEM;
}
if (hr_qp->rq.wqe_cnt) {
rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(rq_wrid)) {
- ibdev_err(ibdev, "Failed to alloc RQ wrid\n");
+ ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
ret = -ENOMEM;
goto err_sq;
}
@@ -860,29 +927,25 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
}
if (udata) {
- if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) {
- ibdev_err(ibdev, "Failed to copy QP ucmd\n");
- return -EFAULT;
+ ret = ib_copy_from_udata(ucmd, udata,
+ min(udata->inlen, sizeof(*ucmd)));
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to copy QP ucmd, ret = %d\n", ret);
+ return ret;
}
ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
if (ret)
- ibdev_err(ibdev, "Failed to set user SQ size\n");
+ ibdev_err(ibdev,
+ "failed to set user SQ size, ret = %d.\n",
+ ret);
} else {
- if (init_attr->create_flags &
- IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- ibdev_err(ibdev, "Failed to check multicast loopback\n");
- return -EINVAL;
- }
-
- if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
- ibdev_err(ibdev, "Failed to check ipoib ud lso\n");
- return -EINVAL;
- }
-
ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
if (ret)
- ibdev_err(ibdev, "Failed to set kernel SQ size\n");
+ ibdev_err(ibdev,
+ "failed to set kernel SQ size, ret = %d.\n",
+ ret);
}
return ret;
@@ -906,47 +969,53 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->state = IB_QPS_RESET;
hr_qp->flush_flag = 0;
+ if (init_attr->create_flags)
+ return -EOPNOTSUPP;
+
ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
if (ret) {
- ibdev_err(ibdev, "Failed to set QP param\n");
+ ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
return ret;
}
if (!udata) {
ret = alloc_kernel_wrid(hr_dev, hr_qp);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc wrid\n");
+ ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
+ ret);
return ret;
}
}
ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc QP doorbell\n");
+ ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
+ ret);
goto err_wrid;
}
ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc QP buffer\n");
+ ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
goto err_db;
}
ret = alloc_qpn(hr_dev, hr_qp);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc QPN\n");
+ ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
goto err_buf;
}
ret = alloc_qpc(hr_dev, hr_qp);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc QP context\n");
+ ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
+ ret);
goto err_qpn;
}
ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
if (ret) {
- ibdev_err(ibdev, "Failed to store QP\n");
+ ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
goto err_qpc;
}
@@ -1003,6 +1072,30 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
kfree(hr_qp);
}
+static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
+ bool is_user)
+{
+ switch (type) {
+ case IB_QPT_UD:
+ if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
+ is_user)
+ goto out;
+ fallthrough;
+ case IB_QPT_RC:
+ case IB_QPT_GSI:
+ break;
+ default:
+ goto out;
+ }
+
+ return 0;
+
+out:
+ ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type);
+
+ return -EOPNOTSUPP;
+}
+
struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
@@ -1012,15 +1105,9 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct hns_roce_qp *hr_qp;
int ret;
- switch (init_attr->qp_type) {
- case IB_QPT_RC:
- case IB_QPT_GSI:
- break;
- default:
- ibdev_err(ibdev, "not support QP type %d\n",
- init_attr->qp_type);
- return ERR_PTR(-EOPNOTSUPP);
- }
+ ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
+ if (ret)
+ return ERR_PTR(ret);
hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
if (!hr_qp)
@@ -1035,10 +1122,11 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
if (ret) {
ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret);
- ibdev_err(ibdev, "Create GSI QP failed!\n");
+
kfree(hr_qp);
return ERR_PTR(ret);
}
+
return &hr_qp->ibqp;
}
@@ -1091,9 +1179,8 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
- ibdev_err(&hr_dev->ib_dev,
- "attr port_num invalid.attr->port_num=%d\n",
- attr->port_num);
+ ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
+ attr->port_num);
return -EINVAL;
}
@@ -1101,8 +1188,8 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
ibdev_err(&hr_dev->ib_dev,
- "attr pkey_index invalid.attr->pkey_index=%d\n",
- attr->pkey_index);
+ "invalid attr, pkey_index = %u.\n",
+ attr->pkey_index);
return -EINVAL;
}
}
@@ -1110,16 +1197,16 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
ibdev_err(&hr_dev->ib_dev,
- "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
- attr->max_rd_atomic);
+ "invalid attr, max_rd_atomic = %u.\n",
+ attr->max_rd_atomic);
return -EINVAL;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
ibdev_err(&hr_dev->ib_dev,
- "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
- attr->max_dest_rd_atomic);
+ "invalid attr, max_dest_rd_atomic = %u.\n",
+ attr->max_dest_rd_atomic);
return -EINVAL;
}
@@ -1244,22 +1331,22 @@ static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
}
-void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
+void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
{
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
}
-void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
+void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
{
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
}
-void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
+void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n)
{
return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
}
-bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
+bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
struct ib_cq *ib_cq)
{
struct hns_roce_cq *hr_cq;
@@ -1280,22 +1367,25 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- int reserved_from_top = 0;
- int reserved_from_bot;
- int ret;
+ unsigned int reserved_from_bot;
+ unsigned int i;
mutex_init(&qp_table->scc_mutex);
+ mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
reserved_from_bot = hr_dev->caps.reserved_qps;
- ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
- hr_dev->caps.num_qps - 1, reserved_from_bot,
- reserved_from_top);
- if (ret) {
- dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
- ret);
- return ret;
+ for (i = 0; i < reserved_from_bot; i++) {
+ hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++;
+ hr_dev->qp_table.bank[get_qp_bankid(i)].min++;
+ }
+
+ for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
+ ida_init(&hr_dev->qp_table.bank[i].ida);
+ hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps /
+ HNS_ROCE_QP_BANK_NUM - 1;
+ hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
}
return 0;
@@ -1303,5 +1393,8 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
{
- hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
+ int i;
+
+ for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
+ ida_destroy(&hr_dev->qp_table.bank[i].ida);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 8caf74e44efd..c4ae57e4173a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -4,7 +4,6 @@
*/
#include <rdma/ib_umem.h>
-#include <rdma/hns-abi.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
@@ -93,7 +92,8 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
if (ret < 1) {
- ibdev_err(ibdev, "Failed to find mtr for SRQ WQE\n");
+ ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
+ ret);
return -ENOBUFS;
}
@@ -101,32 +101,34 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
ARRAY_SIZE(mtts_idx), &dma_handle_idx);
if (ret < 1) {
- ibdev_err(ibdev, "Failed to find mtr for SRQ idx\n");
+ ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
+ ret);
return -ENOBUFS;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc SRQ number, err %d\n", ret);
+ ibdev_err(ibdev,
+ "failed to alloc SRQ number, ret = %d.\n", ret);
return -ENOMEM;
}
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
if (ret) {
- ibdev_err(ibdev, "Failed to get SRQC table, err %d\n", ret);
+ ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
goto err_out;
}
ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
if (ret) {
- ibdev_err(ibdev, "Failed to store SRQC, err %d\n", ret);
+ ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
goto err_put;
}
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR_OR_NULL(mailbox)) {
ret = -ENOMEM;
- ibdev_err(ibdev, "Failed to alloc mailbox for SRQC\n");
+ ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
goto err_xa;
}
@@ -137,7 +139,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- ibdev_err(ibdev, "Failed to config SRQC, err %d\n", ret);
+ ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
goto err_xa;
}
@@ -198,7 +200,8 @@ static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
hr_dev->caps.srqwqe_ba_pg_sz +
HNS_HW_PAGE_SHIFT, udata, addr);
if (err)
- ibdev_err(ibdev, "Failed to alloc SRQ buf mtr, err %d\n", err);
+ ibdev_err(ibdev,
+ "failed to alloc SRQ buf mtr, ret = %d.\n", err);
return err;
}
@@ -229,18 +232,18 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
udata, addr);
if (err) {
- ibdev_err(ibdev, "Failed to alloc SRQ idx mtr, err %d\n", err);
+ ibdev_err(ibdev,
+ "failed to alloc SRQ idx mtr, ret = %d.\n", err);
return err;
}
if (!udata) {
idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap) {
- ibdev_err(ibdev, "Failed to alloc SRQ idx bitmap\n");
+ ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
err = -ENOMEM;
goto err_idx_mtr;
}
-
}
return 0;
@@ -288,6 +291,10 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
int ret;
u32 cqn;
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC)
+ return -EOPNOTSUPP;
+
/* Check the actual SRQ wqe and SRQ sge num */
if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
@@ -300,9 +307,10 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->max_gs = init_attr->attr.max_sge;
if (udata) {
- ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
+ ret = ib_copy_from_udata(&ucmd, udata,
+ min(udata->inlen, sizeof(ucmd)));
if (ret) {
- ibdev_err(ibdev, "Failed to copy SRQ udata, err %d\n",
+ ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n",
ret);
return ret;
}
@@ -310,20 +318,21 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc SRQ buffer, err %d\n", ret);
+ ibdev_err(ibdev,
+ "failed to alloc SRQ buffer, ret = %d.\n", ret);
return ret;
}
ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc SRQ idx, err %d\n", ret);
+ ibdev_err(ibdev, "failed to alloc SRQ idx, ret = %d.\n", ret);
goto err_buf_alloc;
}
if (!udata) {
ret = alloc_srq_wrid(hr_dev, srq);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc SRQ wrid, err %d\n",
+ ibdev_err(ibdev, "failed to alloc SRQ wrid, ret = %d.\n",
ret);
goto err_idx_alloc;
}
@@ -335,7 +344,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc SRQ context, err %d\n", ret);
+ ibdev_err(ibdev,
+ "failed to alloc SRQ context, ret = %d.\n", ret);
goto err_wrid_alloc;
}
@@ -343,11 +353,10 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
resp.srqn = srq->srqn;
if (udata) {
- if (ib_copy_to_udata(udata, &resp,
- min(udata->outlen, sizeof(resp)))) {
- ret = -EFAULT;
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ if (ret)
goto err_srqc_alloc;
- }
}
return 0;
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 832b80de004f..6a79502c8b53 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -274,7 +274,6 @@ struct i40iw_device {
u8 max_sge;
u8 iw_status;
u8 send_term_ok;
- bool push_mode; /* Initialized from parameter passed to driver */
/* x710 specific */
struct mutex pbl_mutex;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 3053c345a5a3..9acc0ecc9a43 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2426,7 +2426,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
}
break;
case I40IW_CM_STATE_MPAREQ_RCVD:
- atomic_add_return(1, &cm_node->passive_state);
+ atomic_inc(&cm_node->passive_state);
break;
case I40IW_CM_STATE_ESTABLISHED:
case I40IW_CM_STATE_SYN_RCVD:
@@ -3020,7 +3020,7 @@ static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8
i40iw_cleanup_retrans_entry(cm_node);
if (!loopback) {
- passive_state = atomic_add_return(1, &cm_node->passive_state);
+ passive_state = atomic_inc_return(&cm_node->passive_state);
if (passive_state == I40IW_SEND_RESET_EVENT) {
cm_node->state = I40IW_CM_STATE_CLOSED;
i40iw_rem_ref_cm_node(cm_node);
@@ -3678,7 +3678,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return -EINVAL;
}
- passive_state = atomic_add_return(1, &cm_node->passive_state);
+ passive_state = atomic_inc_return(&cm_node->passive_state);
if (passive_state == I40IW_SEND_RESET_EVENT) {
i40iw_rem_ref_cm_node(cm_node);
return -ECONNRESET;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 86d3f8aff329..c943d491b72b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -820,46 +820,6 @@ static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
}
/**
- * i40iw_sc_manage_push_page - Handle push page
- * @cqp: struct for cqp hw
- * @info: push page info
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_manage_push_page(
- struct i40iw_sc_cqp *cqp,
- struct i40iw_cqp_manage_push_page_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- u64 header;
-
- if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
- return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 16, info->qs_handle);
-
- header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
- LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
- LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
* i40iw_sc_manage_hmc_pm_func_table - manage of function table
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
@@ -2859,9 +2819,7 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
- LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
- LS_64(info->push_idx, I40IWQPC_PPIDX) |
- LS_64(info->push_mode_en, I40IWQPC_PMENA);
+ LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN);
set_64bit_val(qp_ctx, 8, qp->sq_pa);
set_64bit_val(qp_ctx, 16, qp->rq_pa);
@@ -4291,13 +4249,6 @@ static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
pcmdinfo->in.u.add_arp_cache_entry.scratch,
pcmdinfo->post_sq);
break;
- case OP_MANAGE_PUSH_PAGE:
- status = i40iw_sc_manage_push_page(
- pcmdinfo->in.u.manage_push_page.cqp,
- &pcmdinfo->in.u.manage_push_page.info,
- pcmdinfo->in.u.manage_push_page.scratch,
- pcmdinfo->post_sq);
- break;
case OP_UPDATE_PE_SDS:
/* case I40IW_CQP_OP_UPDATE_PE_SDS */
status = i40iw_update_pe_sds(
@@ -5098,7 +5049,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
i40iw_hw_stats_stop_timer(vsi);
}
-static struct i40iw_cqp_ops iw_cqp_ops = {
+static const struct i40iw_cqp_ops iw_cqp_ops = {
.cqp_init = i40iw_sc_cqp_init,
.cqp_create = i40iw_sc_cqp_create,
.cqp_post_sq = i40iw_sc_cqp_post_sq,
@@ -5107,7 +5058,7 @@ static struct i40iw_cqp_ops iw_cqp_ops = {
.poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
};
-static struct i40iw_ccq_ops iw_ccq_ops = {
+static const struct i40iw_ccq_ops iw_ccq_ops = {
.ccq_init = i40iw_sc_ccq_init,
.ccq_create = i40iw_sc_ccq_create,
.ccq_destroy = i40iw_sc_ccq_destroy,
@@ -5116,7 +5067,7 @@ static struct i40iw_ccq_ops iw_ccq_ops = {
.ccq_arm = i40iw_sc_ccq_arm
};
-static struct i40iw_ceq_ops iw_ceq_ops = {
+static const struct i40iw_ceq_ops iw_ceq_ops = {
.ceq_init = i40iw_sc_ceq_init,
.ceq_create = i40iw_sc_ceq_create,
.cceq_create_done = i40iw_sc_cceq_create_done,
@@ -5126,7 +5077,7 @@ static struct i40iw_ceq_ops iw_ceq_ops = {
.process_ceq = i40iw_sc_process_ceq
};
-static struct i40iw_aeq_ops iw_aeq_ops = {
+static const struct i40iw_aeq_ops iw_aeq_ops = {
.aeq_init = i40iw_sc_aeq_init,
.aeq_create = i40iw_sc_aeq_create,
.aeq_destroy = i40iw_sc_aeq_destroy,
@@ -5137,11 +5088,11 @@ static struct i40iw_aeq_ops iw_aeq_ops = {
};
/* iwarp pd ops */
-static struct i40iw_pd_ops iw_pd_ops = {
+static const struct i40iw_pd_ops iw_pd_ops = {
.pd_init = i40iw_sc_pd_init,
};
-static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
+static const struct i40iw_priv_qp_ops iw_priv_qp_ops = {
.qp_init = i40iw_sc_qp_init,
.qp_create = i40iw_sc_qp_create,
.qp_modify = i40iw_sc_qp_modify,
@@ -5156,14 +5107,14 @@ static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
.iw_mr_fast_register = i40iw_sc_mr_fast_register
};
-static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
+static const struct i40iw_priv_cq_ops iw_priv_cq_ops = {
.cq_init = i40iw_sc_cq_init,
.cq_create = i40iw_sc_cq_create,
.cq_destroy = i40iw_sc_cq_destroy,
.cq_modify = i40iw_sc_cq_modify,
};
-static struct i40iw_mr_ops iw_mr_ops = {
+static const struct i40iw_mr_ops iw_mr_ops = {
.alloc_stag = i40iw_sc_alloc_stag,
.mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
.mr_reg_shared = i40iw_sc_mr_reg_shared,
@@ -5172,8 +5123,7 @@ static struct i40iw_mr_ops iw_mr_ops = {
.mw_alloc = i40iw_sc_mw_alloc
};
-static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
- .manage_push_page = i40iw_sc_manage_push_page,
+static const struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
.manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
.set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
.commit_fpm_values = i40iw_sc_commit_fpm_values,
@@ -5195,7 +5145,7 @@ static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
.update_resume_qp = i40iw_sc_resume_qp
};
-static struct i40iw_hmc_ops iw_hmc_ops = {
+static const struct i40iw_hmc_ops iw_hmc_ops = {
.init_iw_hmc = i40iw_sc_init_iw_hmc,
.parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
.configure_iw_fpm = i40iw_sc_configure_iw_fpm,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index e8367d67575d..86d5a33c57cc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -40,11 +40,6 @@
#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
-#define I40IW_PUSH_OFFSET (4 * 1024 * 1024)
-#define I40IW_PF_FIRST_PUSH_PAGE_INDEX 16
-#define I40IW_VF_PUSH_OFFSET ((8 + 64) * 1024)
-#define I40IW_VF_FIRST_PUSH_PAGE_INDEX 2
-
#define I40IW_PE_DB_SIZE_4M 1
#define I40IW_PE_DB_SIZE_8M 2
@@ -402,7 +397,6 @@
#define I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE 0x0e
#define I40IW_CQP_OP_MANAGE_ARP 0x0f
#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10
-#define I40IW_CQP_OP_MANAGE_PUSH_PAGES 0x11
#define I40IW_CQP_OP_QUERY_RDMA_FEATURES 0x12
#define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13
#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14
@@ -843,7 +837,6 @@
#define I40IW_CQPSQ_MVPBP_PD_PLPBA_MASK \
(0x1fffffffffffffffULL << I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT)
-/* Manage Push Page - MPP */
#define I40IW_INVALID_PUSH_PAGE_INDEX 0xffff
#define I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT 0
@@ -1352,9 +1345,6 @@
#define I40IWQPSQ_ADDFRAGCNT_SHIFT 38
#define I40IWQPSQ_ADDFRAGCNT_MASK (0x7ULL << I40IWQPSQ_ADDFRAGCNT_SHIFT)
-#define I40IWQPSQ_PUSHWQE_SHIFT 56
-#define I40IWQPSQ_PUSHWQE_MASK (1ULL << I40IWQPSQ_PUSHWQE_SHIFT)
-
#define I40IWQPSQ_STREAMMODE_SHIFT 58
#define I40IWQPSQ_STREAMMODE_MASK (1ULL << I40IWQPSQ_STREAMMODE_SHIFT)
@@ -1740,18 +1730,17 @@ enum i40iw_alignment {
#define OP_MW_ALLOC 20
#define OP_QP_FLUSH_WQES 21
#define OP_ADD_ARP_CACHE_ENTRY 22
-#define OP_MANAGE_PUSH_PAGE 23
-#define OP_UPDATE_PE_SDS 24
-#define OP_MANAGE_HMC_PM_FUNC_TABLE 25
-#define OP_SUSPEND 26
-#define OP_RESUME 27
-#define OP_MANAGE_VF_PBLE_BP 28
-#define OP_QUERY_FPM_VALUES 29
-#define OP_COMMIT_FPM_VALUES 30
-#define OP_REQUESTED_COMMANDS 31
-#define OP_COMPLETED_COMMANDS 32
-#define OP_GEN_AE 33
-#define OP_QUERY_RDMA_FEATURES 34
-#define OP_SIZE_CQP_STAT_ARRAY 35
+#define OP_UPDATE_PE_SDS 23
+#define OP_MANAGE_HMC_PM_FUNC_TABLE 24
+#define OP_SUSPEND 25
+#define OP_RESUME 26
+#define OP_MANAGE_VF_PBLE_BP 27
+#define OP_QUERY_FPM_VALUES 28
+#define OP_COMMIT_FPM_VALUES 29
+#define OP_REQUESTED_COMMANDS 30
+#define OP_COMPLETED_COMMANDS 31
+#define OP_GEN_AE 32
+#define OP_QUERY_RDMA_FEATURES 33
+#define OP_SIZE_CQP_STAT_ARRAY 34
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
index d1c5855bd8c3..36a19c4e5bba 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -61,7 +61,6 @@ enum i40iw_status_code {
I40IW_ERR_QUEUE_EMPTY = -22,
I40IW_ERR_INVALID_ALIGNMENT = -23,
I40IW_ERR_FLUSHED_QUEUE = -24,
- I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
I40IW_ERR_TIMEOUT = -27,
I40IW_ERR_OPCODE_MISMATCH = -28,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index c3babf3cbb8e..394e182686cf 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -387,7 +387,6 @@ struct i40iw_sc_qp {
u8 *q2_buf;
u64 qp_compl_ctx;
u16 qs_handle;
- u16 push_idx;
u8 sq_tph_val;
u8 rq_tph_val;
u8 qp_state;
@@ -493,16 +492,16 @@ struct i40iw_sc_dev {
struct i40iw_sc_aeq *aeq;
struct i40iw_sc_ceq *ceq[I40IW_CEQ_MAX_COUNT];
struct i40iw_sc_cq *ccq;
- struct i40iw_cqp_ops *cqp_ops;
- struct i40iw_ccq_ops *ccq_ops;
- struct i40iw_ceq_ops *ceq_ops;
- struct i40iw_aeq_ops *aeq_ops;
- struct i40iw_pd_ops *iw_pd_ops;
- struct i40iw_priv_qp_ops *iw_priv_qp_ops;
- struct i40iw_priv_cq_ops *iw_priv_cq_ops;
- struct i40iw_mr_ops *mr_ops;
- struct i40iw_cqp_misc_ops *cqp_misc_ops;
- struct i40iw_hmc_ops *hmc_ops;
+ const struct i40iw_cqp_ops *cqp_ops;
+ const struct i40iw_ccq_ops *ccq_ops;
+ const struct i40iw_ceq_ops *ceq_ops;
+ const struct i40iw_aeq_ops *aeq_ops;
+ const struct i40iw_pd_ops *iw_pd_ops;
+ const struct i40iw_priv_qp_ops *iw_priv_qp_ops;
+ const struct i40iw_priv_cq_ops *iw_priv_cq_ops;
+ const struct i40iw_mr_ops *mr_ops;
+ const struct i40iw_cqp_misc_ops *cqp_misc_ops;
+ const struct i40iw_hmc_ops *hmc_ops;
struct i40iw_vchnl_if vchnl_if;
const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
@@ -749,8 +748,6 @@ struct i40iw_qp_host_ctx_info {
struct i40iwarp_offload_info *iwarp_info;
u32 send_cq_num;
u32 rcv_cq_num;
- u16 push_idx;
- bool push_mode_en;
bool tcp_info_valid;
bool iwarp_info_valid;
bool err_rq_idx_valid;
@@ -937,12 +934,6 @@ struct i40iw_local_mac_ipaddr_entry_info {
u8 entry_idx;
};
-struct i40iw_cqp_manage_push_page_info {
- u32 push_idx;
- u16 qs_handle;
- u8 free_page;
-};
-
struct i40iw_qp_flush_info {
u16 sq_minor_code;
u16 sq_major_code;
@@ -1114,9 +1105,6 @@ struct i40iw_mr_ops {
};
struct i40iw_cqp_misc_ops {
- enum i40iw_status_code (*manage_push_page)(struct i40iw_sc_cqp *,
- struct i40iw_cqp_manage_push_page_info *,
- u64, bool);
enum i40iw_status_code (*manage_hmc_pm_func_table)(struct i40iw_sc_cqp *,
u64, u8, bool, bool);
enum i40iw_status_code (*set_hmc_resource_profile)(struct i40iw_sc_cqp *,
@@ -1254,12 +1242,6 @@ struct cqp_info {
} manage_vf_pble_bp;
struct {
- struct i40iw_sc_cqp *cqp;
- struct i40iw_cqp_manage_push_page_info info;
- u64 scratch;
- } manage_push_page;
-
- struct {
struct i40iw_sc_dev *dev;
struct i40iw_upload_context_info info;
u64 scratch;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 8afa5a67a86b..c3633c9944db 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -115,17 +115,6 @@ void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
}
/**
- * i40iw_qp_ring_push_db - ring qp doorbell
- * @qp: hw qp ptr
- * @wqe_idx: wqe index
- */
-static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx)
-{
- set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
- qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
-}
-
-/**
* i40iw_qp_get_next_send_wqe - return next wqe ptr
* @qp: hw qp ptr
* @wqe_idx: return wqe index
@@ -426,7 +415,6 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
u64 *wqe;
u8 *dest, *src;
struct i40iw_inline_rdma_write *op_info;
- u64 *push;
u64 header = 0;
u32 wqe_idx;
enum i40iw_status_code ret_code;
@@ -453,7 +441,6 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
- LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
LS_64(read_fence, I40IWQPSQ_READFENCE) |
LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
@@ -475,14 +462,8 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
set_64bit_val(wqe, 24, header);
- if (qp->push_db) {
- push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
- memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
- i40iw_qp_ring_push_db(qp, wqe_idx);
- } else {
- if (post_sq)
- i40iw_qp_post_wr(qp);
- }
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
return 0;
}
@@ -507,7 +488,6 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
enum i40iw_status_code ret_code;
bool read_fence = false;
u8 wqe_size;
- u64 *push;
op_info = &info->op.inline_send;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
@@ -526,7 +506,6 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
LS_64(info->op_type, I40IWQPSQ_OPCODE) |
LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
- LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
LS_64(read_fence, I40IWQPSQ_READFENCE) |
LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
@@ -548,14 +527,8 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
set_64bit_val(wqe, 24, header);
- if (qp->push_db) {
- push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
- memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
- i40iw_qp_ring_push_db(qp, wqe_idx);
- } else {
- if (post_sq)
- i40iw_qp_post_wr(qp);
- }
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
return 0;
}
@@ -772,7 +745,6 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
- info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP);
if (info->error) {
info->comp_status = I40IW_COMPL_STATUS_FLUSHED;
info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);
@@ -951,7 +923,6 @@ enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
static const struct i40iw_qp_uk_ops iw_qp_uk_ops = {
.iw_qp_post_wr = i40iw_qp_post_wr,
- .iw_qp_ring_push_db = i40iw_qp_ring_push_db,
.iw_rdma_write = i40iw_rdma_write,
.iw_rdma_read = i40iw_rdma_read,
.iw_send = i40iw_send,
@@ -1009,11 +980,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
qp->wqe_alloc_reg = info->wqe_alloc_reg;
qp->qp_id = info->qp_id;
-
qp->sq_size = info->sq_size;
- qp->push_db = info->push_db;
- qp->push_wqe = info->push_wqe;
-
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
sq_ring_size = qp->sq_size << sqshift;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index b125925641e0..93fc3081dd65 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -64,13 +64,11 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_SGE_RD = 1,
I40IW_MAX_OUTBOUND_MESSAGE_SIZE = 2147483647,
I40IW_MAX_INBOUND_MESSAGE_SIZE = 2147483647,
- I40IW_MAX_PUSH_PAGE_COUNT = 4096,
I40IW_MAX_PE_ENABLED_VF_COUNT = 32,
I40IW_MAX_VF_FPM_ID = 47,
I40IW_MAX_VF_PER_PF = 127,
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
- I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
I40IW_MAX_IRD_SIZE = 64,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
@@ -272,7 +270,6 @@ struct i40iw_cq_poll_info {
u16 minor_err;
u8 op_type;
bool stag_invalid_set;
- bool push_dropped;
bool error;
bool is_srq;
bool solicited_event;
@@ -280,7 +277,6 @@ struct i40iw_cq_poll_info {
struct i40iw_qp_uk_ops {
void (*iw_qp_post_wr)(struct i40iw_qp_uk *);
- void (*iw_qp_ring_push_db)(struct i40iw_qp_uk *, u32);
enum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool);
enum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *,
@@ -340,8 +336,6 @@ struct i40iw_qp_uk {
struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u64 *shadow_area;
- u32 *push_db;
- u64 *push_wqe;
struct i40iw_ring sq_ring;
struct i40iw_ring rq_ring;
struct i40iw_ring initial_ring;
@@ -381,8 +375,6 @@ struct i40iw_qp_uk_init_info {
u64 *shadow_area;
struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
- u32 *push_db;
- u64 *push_wqe;
u32 qp_id;
u32 sq_size;
u32 rq_size;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 533f3caecb7a..65aedfe57e77 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -180,78 +180,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
}
/**
- * i40iw_alloc_push_page - allocate a push page for qp
- * @iwdev: iwarp device
- * @qp: hardware control qp
- */
-static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
-{
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- enum i40iw_status_code status;
-
- if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
- return;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
- if (!cqp_request)
- return;
-
- atomic_inc(&cqp_request->refcount);
-
- cqp_info = &cqp_request->info;
- cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
- cqp_info->post_sq = 1;
-
- cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
- cqp_info->in.u.manage_push_page.info.free_page = 0;
- cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
- cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
-
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (!status)
- qp->push_idx = cqp_request->compl_info.op_ret_val;
- else
- i40iw_pr_err("CQP-OP Push page fail");
- i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
-}
-
-/**
- * i40iw_dealloc_push_page - free a push page for qp
- * @iwdev: iwarp device
- * @qp: hardware control qp
- */
-static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
-{
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- enum i40iw_status_code status;
-
- if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
- return;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
- if (!cqp_request)
- return;
-
- cqp_info = &cqp_request->info;
- cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
- cqp_info->post_sq = 1;
-
- cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
- cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
- cqp_info->in.u.manage_push_page.info.free_page = 1;
- cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
- cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
-
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (!status)
- qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
- else
- i40iw_pr_err("CQP-OP Push page fail");
-}
-
-/**
* i40iw_alloc_pd - allocate protection domain
* @pd: PD pointer
* @udata: user data
@@ -348,7 +276,6 @@ void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
u32 qp_num = iwqp->ibqp.qp_num;
i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
- i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
if (qp_num)
i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
if (iwpbl->pbl_allocated)
@@ -533,7 +460,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
return ERR_PTR(-ENODEV);
if (init_attr->create_flags)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
@@ -561,8 +488,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
qp = &iwqp->sc_qp;
qp->back_qp = (void *)iwqp;
- qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
-
iwqp->iwdev = iwdev;
iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
@@ -606,8 +531,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
err_code = -EOPNOTSUPP;
goto error;
}
- if (iwdev->push_mode)
- i40iw_alloc_push_page(iwdev, qp);
if (udata) {
err_code = ib_copy_from_udata(&req, udata, sizeof(req));
if (err_code) {
@@ -666,13 +589,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
ctx_info->iwarp_info_valid = true;
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
- if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
- ctx_info->push_mode_en = false;
- } else {
- ctx_info->push_mode_en = true;
- ctx_info->push_idx = qp->push_idx;
- }
-
ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
(u64 *)iwqp->host_ctx.va,
ctx_info);
@@ -712,7 +628,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
uresp.actual_sq_size = sq_size;
uresp.actual_rq_size = rq_size;
uresp.qp_id = qp_num;
- uresp.push_idx = qp->push_idx;
+ uresp.push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (err_code) {
i40iw_pr_err("copy_to_udata failed\n");
@@ -832,6 +748,9 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
u32 err;
unsigned long flags;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
memset(&info, 0, sizeof(info));
ctx_info = &iwqp->ctx_info;
iwarp_info = &iwqp->iwarp_info;
@@ -1081,6 +1000,9 @@ static int i40iw_create_cq(struct ib_cq *ibcq,
int err_code;
int entries = attr->cqe;
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
if (iwdev->closing)
return -ENODEV;
@@ -2033,7 +1955,7 @@ static ssize_t hw_rev_show(struct device *dev,
rdma_device_to_drv_device(dev, struct i40iw_ib_device, ibdev);
u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
- return sprintf(buf, "%x\n", hw_rev);
+ return sysfs_emit(buf, "%x\n", hw_rev);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -2043,7 +1965,7 @@ static DEVICE_ATTR_RO(hw_rev);
static ssize_t hca_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "I40IW\n");
+ return sysfs_emit(buf, "I40IW\n");
}
static DEVICE_ATTR_RO(hca_type);
@@ -2053,7 +1975,7 @@ static DEVICE_ATTR_RO(hca_type);
static ssize_t board_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
+ return sysfs_emit(buf, "%.*s\n", 32, "I40IW Board ID");
}
static DEVICE_ATTR_RO(board_id);
@@ -2661,27 +2583,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
- iwibdev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND);
iwibdev->ibdev.phys_port_cnt = 1;
iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
iwibdev->ibdev.dev.parent = &pcidev->dev;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 8bd16474708f..f3ace85552f3 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1523,6 +1523,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
return;
} else
*slave_id = slave;
+ break;
default:
/* nothing */;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index cd0fba6b0964..e3cd402c079a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2024,7 +2024,8 @@ static ssize_t hca_type_show(struct device *device,
{
struct mlx4_ib_dev *dev =
rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
- return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
+
+ return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
static DEVICE_ATTR_RO(hca_type);
@@ -2033,7 +2034,8 @@ static ssize_t hw_rev_show(struct device *device,
{
struct mlx4_ib_dev *dev =
rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
- return sprintf(buf, "%x\n", dev->dev->rev_id);
+
+ return sysfs_emit(buf, "%x\n", dev->dev->rev_id);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -2043,8 +2045,7 @@ static ssize_t board_id_show(struct device *device,
struct mlx4_ib_dev *dev =
rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
- return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
- dev->dev->board_id);
+ return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id);
}
static DEVICE_ATTR_RO(board_id);
@@ -2264,10 +2265,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
u64 release_mac = MLX4_IB_INVALID_MAC;
struct mlx4_ib_qp *qp;
- read_lock(&dev_base_lock);
new_smac = mlx4_mac_to_u64(dev->dev_addr);
- read_unlock(&dev_base_lock);
-
atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */
@@ -2657,73 +2655,25 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
- ibdev->ib_dev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_REREG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
- (1ull << IB_USER_VERBS_CMD_OPEN_QP);
-
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
- ibdev->ib_dev.uverbs_ex_cmd_mask |=
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
- (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
IB_LINK_LAYER_ETHERNET) ||
(mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
- IB_LINK_LAYER_ETHERNET))) {
- ibdev->ib_dev.uverbs_ex_cmd_mask |=
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+ IB_LINK_LAYER_ETHERNET)))
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
- }
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
- dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
- ibdev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
+ dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
- }
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
- ibdev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
- (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
}
if (check_flow_steering_support(dev)) {
ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
- ibdev->ib_dev.uverbs_ex_cmd_mask |=
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
}
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 5e4ec9786081..33f525b744f2 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -988,53 +988,63 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
}
static ssize_t sysfs_show_group(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct mcast_group *group =
container_of(attr, struct mcast_group, dentry);
struct mcast_req *req = NULL;
- char pending_str[40];
char state_str[40];
- ssize_t len = 0;
- int f;
+ char pending_str[40];
+ int len;
+ int i;
+ u32 hoplimit;
if (group->state == MCAST_IDLE)
- sprintf(state_str, "%s", get_state_string(group->state));
+ scnprintf(state_str, sizeof(state_str), "%s",
+ get_state_string(group->state));
else
- sprintf(state_str, "%s(TID=0x%llx)",
- get_state_string(group->state),
- be64_to_cpu(group->last_req_tid));
+ scnprintf(state_str, sizeof(state_str), "%s(TID=0x%llx)",
+ get_state_string(group->state),
+ be64_to_cpu(group->last_req_tid));
+
if (list_empty(&group->pending_list)) {
- sprintf(pending_str, "No");
+ scnprintf(pending_str, sizeof(pending_str), "No");
} else {
- req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
- sprintf(pending_str, "Yes(TID=0x%llx)",
- be64_to_cpu(req->sa_mad.mad_hdr.tid));
+ req = list_first_entry(&group->pending_list, struct mcast_req,
+ group_list);
+ scnprintf(pending_str, sizeof(pending_str), "Yes(TID=0x%llx)",
+ be64_to_cpu(req->sa_mad.mad_hdr.tid));
}
- len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
- group->rec.scope_join_state & 0xf,
- group->members[2], group->members[1], group->members[0],
- atomic_read(&group->refcount),
- pending_str,
- state_str);
- for (f = 0; f < MAX_VFS; ++f)
- if (group->func[f].state == MCAST_MEMBER)
- len += sprintf(buf + len, "%d[%1x] ",
- f, group->func[f].join_state);
-
- len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
- "%4x %4x %2x %2x)\n",
- be16_to_cpu(group->rec.pkey),
- be32_to_cpu(group->rec.qkey),
- (group->rec.mtusel_mtu & 0xc0) >> 6,
- group->rec.mtusel_mtu & 0x3f,
- group->rec.tclass,
- (group->rec.ratesel_rate & 0xc0) >> 6,
- group->rec.ratesel_rate & 0x3f,
- (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
- (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
- be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff,
- group->rec.proxy_join);
+
+ len = sysfs_emit(buf, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
+ group->rec.scope_join_state & 0xf,
+ group->members[2],
+ group->members[1],
+ group->members[0],
+ atomic_read(&group->refcount),
+ pending_str,
+ state_str);
+
+ for (i = 0; i < MAX_VFS; i++) {
+ if (group->func[i].state == MCAST_MEMBER)
+ len += sysfs_emit_at(buf, len, "%d[%1x] ", i,
+ group->func[i].join_state);
+ }
+
+ hoplimit = be32_to_cpu(group->rec.sl_flowlabel_hoplimit);
+ len += sysfs_emit_at(buf, len,
+ "\t\t(%4hx %4x %2x %2x %2x %2x %2x %4x %4x %2x %2x)\n",
+ be16_to_cpu(group->rec.pkey),
+ be32_to_cpu(group->rec.qkey),
+ (group->rec.mtusel_mtu & 0xc0) >> 6,
+ (group->rec.mtusel_mtu & 0x3f),
+ group->rec.tclass,
+ (group->rec.ratesel_rate & 0xc0) >> 6,
+ (group->rec.ratesel_rate & 0x3f),
+ (hoplimit & 0xf0000000) >> 28,
+ (hoplimit & 0x0fffff00) >> 8,
+ (hoplimit & 0x000000ff),
+ group->rec.proxy_join);
return len;
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 58df06492d69..78c9bb79ec75 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -908,10 +908,10 @@ int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
int is_attach);
-int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
- u64 start, u64 length, u64 virt_addr,
- int mr_access_flags, struct ib_pd *pd,
- struct ib_udata *udata);
+struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
const struct ib_gid_attr *attr);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 426fed005d53..50becc0e4b62 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -456,10 +456,10 @@ err_free:
return ERR_PTR(err);
}
-int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
- u64 start, u64 length, u64 virt_addr,
- int mr_access_flags, struct ib_pd *pd,
- struct ib_udata *udata)
+struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(mr->device);
struct mlx4_ib_mr *mmr = to_mmr(mr);
@@ -472,9 +472,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
* race exists.
*/
err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
-
if (err)
- return err;
+ return ERR_PTR(err);
if (flags & IB_MR_REREG_PD) {
err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
@@ -542,8 +541,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
release_mpt_entry:
mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
-
- return err;
+ if (err)
+ return ERR_PTR(err);
+ return NULL;
}
static int
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5cb8e602294c..651785bd57f2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1493,7 +1493,7 @@ static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF |
MLX4_IB_QP_CREATE_ROCE_V2_GSI))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
if (init_attr->qp_type != IB_QPT_UD)
@@ -1561,6 +1561,11 @@ static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
if (err)
return err;
+ if (init_attr->create_flags &
+ (MLX4_IB_SRIOV_SQP | MLX4_IB_SRIOV_TUNNEL_QP))
+ /* Internal QP created with ib_create_qp */
+ rdma_restrack_no_track(&qp->ibqp.res);
+
qp->port = init_attr->port_num;
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI ? sqpn : 1;
@@ -2787,6 +2792,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
int ret;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
@@ -4007,7 +4015,9 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
qp_attr->qp_access_flags =
to_ib_qp_access_flags(be32_to_cpu(context.params2));
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index bf618529e734..6a381751c0d8 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -86,6 +86,10 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
int err;
int i;
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC)
+ return -EOPNOTSUPP;
+
/* Sanity check SRQ size before proceeding */
if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index ea1f3a081b05..1b5891130aab 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -56,7 +56,7 @@ static ssize_t show_admin_alias_guid(struct device *dev,
mlx4_ib_iov_dentry->entry_num,
port->num);
- return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val));
+ return sysfs_emit(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val));
}
/* store_admin_alias_guid stores the (new) administratively assigned value of that GUID.
@@ -117,22 +117,24 @@ static ssize_t show_port_gid(struct device *dev,
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
union ib_gid gid;
- ssize_t ret;
+ int ret;
+ __be16 *raw;
ret = __mlx4_ib_query_gid(&mdev->ib_dev, port->num,
mlx4_ib_iov_dentry->entry_num, &gid, 1);
if (ret)
return ret;
- ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((__be16 *) gid.raw)[0]),
- be16_to_cpu(((__be16 *) gid.raw)[1]),
- be16_to_cpu(((__be16 *) gid.raw)[2]),
- be16_to_cpu(((__be16 *) gid.raw)[3]),
- be16_to_cpu(((__be16 *) gid.raw)[4]),
- be16_to_cpu(((__be16 *) gid.raw)[5]),
- be16_to_cpu(((__be16 *) gid.raw)[6]),
- be16_to_cpu(((__be16 *) gid.raw)[7]));
- return ret;
+
+ raw = (__be16 *)gid.raw;
+ return sysfs_emit(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ be16_to_cpu(raw[0]),
+ be16_to_cpu(raw[1]),
+ be16_to_cpu(raw[2]),
+ be16_to_cpu(raw[3]),
+ be16_to_cpu(raw[4]),
+ be16_to_cpu(raw[5]),
+ be16_to_cpu(raw[6]),
+ be16_to_cpu(raw[7]));
}
static ssize_t show_phys_port_pkey(struct device *dev,
@@ -151,7 +153,7 @@ static ssize_t show_phys_port_pkey(struct device *dev,
if (ret)
return ret;
- return sprintf(buf, "0x%04x\n", pkey);
+ return sysfs_emit(buf, "0x%04x\n", pkey);
}
#define DENTRY_REMOVE(_dentry) \
@@ -441,16 +443,12 @@ static ssize_t show_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
- ssize_t ret = -ENODEV;
-
- if (p->dev->pkeys.virt2phys_pkey[p->slave][p->port_num - 1][tab_attr->index] >=
- (p->dev->dev->caps.pkey_table_len[p->port_num]))
- ret = sprintf(buf, "none\n");
- else
- ret = sprintf(buf, "%d\n",
- p->dev->pkeys.virt2phys_pkey[p->slave]
- [p->port_num - 1][tab_attr->index]);
- return ret;
+ struct pkey_mgt *m = &p->dev->pkeys;
+ u8 key = m->virt2phys_pkey[p->slave][p->port_num - 1][tab_attr->index];
+
+ if (key >= p->dev->dev->caps.pkey_table_len[p->port_num])
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%d\n", key);
}
static ssize_t store_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
@@ -488,7 +486,7 @@ static ssize_t store_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
static ssize_t show_port_gid_idx(struct mlx4_port *p,
struct port_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", p->slave);
+ return sysfs_emit(buf, "%d\n", p->slave);
}
static struct attribute **
@@ -542,14 +540,10 @@ static ssize_t sysfs_show_smi_enabled(struct device *dev,
{
struct mlx4_port *p =
container_of(attr, struct mlx4_port, smi_enabled);
- ssize_t len = 0;
- if (mlx4_vf_smi_enabled(p->dev->dev, p->slave, p->port_num))
- len = sprintf(buf, "%d\n", 1);
- else
- len = sprintf(buf, "%d\n", 0);
-
- return len;
+ return sysfs_emit(buf, "%d\n",
+ !!mlx4_vf_smi_enabled(p->dev->dev, p->slave,
+ p->port_num));
}
static ssize_t sysfs_show_enable_smi_admin(struct device *dev,
@@ -558,14 +552,10 @@ static ssize_t sysfs_show_enable_smi_admin(struct device *dev,
{
struct mlx4_port *p =
container_of(attr, struct mlx4_port, enable_smi_admin);
- ssize_t len = 0;
-
- if (mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave, p->port_num))
- len = sprintf(buf, "%d\n", 1);
- else
- len = sprintf(buf, "%d\n", 0);
- return len;
+ return sysfs_emit(buf, "%d\n",
+ !!mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave,
+ p->port_num));
}
static ssize_t sysfs_store_enable_smi_admin(struct device *dev,
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index fb62f1d04afa..eb92cefffd77 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -707,10 +707,10 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int *cqe_size, int *index, int *inlen)
{
struct mlx5_ib_create_cq ucmd = {};
+ unsigned long page_size;
+ unsigned int page_offset_quantized;
size_t ucmdlen;
- int page_shift;
__be64 *pas;
- int npages;
int ncont;
void *cqc;
int err;
@@ -742,14 +742,24 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
return err;
}
+ page_size = mlx5_umem_find_best_cq_quantized_pgoff(
+ cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64, &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto err_umem;
+ }
+
err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db);
if (err)
goto err_umem;
- mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
- &ncont, NULL);
- mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
- ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
+ ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size);
+ mlx5_ib_dbg(
+ dev,
+ "addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n",
+ ucmd.buf_addr, entries * ucmd.cqe_size,
+ ib_umem_num_pages(cq->buf.umem), page_size, ncont);
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
@@ -760,11 +770,12 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
}
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
- mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
+ mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
- page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
*index = ucmd.uar_page_index;
@@ -1128,13 +1139,12 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
}
static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
- int entries, struct ib_udata *udata, int *npas,
- int *page_shift, int *cqe_size)
+ int entries, struct ib_udata *udata,
+ int *cqe_size)
{
struct mlx5_ib_resize_cq ucmd;
struct ib_umem *umem;
int err;
- int npages;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (err)
@@ -1155,9 +1165,6 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
return err;
}
- mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
- npas, NULL);
-
cq->resize_umem = umem;
*cqe_size = ucmd.cqe_size;
@@ -1250,7 +1257,8 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
int err;
int npas;
__be64 *pas;
- int page_shift;
+ unsigned int page_offset_quantized = 0;
+ unsigned int page_shift;
int inlen;
int cqe_size;
unsigned long flags;
@@ -1277,22 +1285,34 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
mutex_lock(&cq->resize_mutex);
if (udata) {
- err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
- &cqe_size);
+ unsigned long page_size;
+
+ err = resize_user(dev, cq, entries, udata, &cqe_size);
+ if (err)
+ goto ex;
+
+ page_size = mlx5_umem_find_best_cq_quantized_pgoff(
+ cq->resize_umem, cqc, log_page_size,
+ MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
+ &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto ex_resize;
+ }
+ npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size);
+ page_shift = order_base_2(page_size);
} else {
+ struct mlx5_frag_buf *frag_buf;
+
cqe_size = 64;
err = resize_kernel(dev, cq, entries, cqe_size);
- if (!err) {
- struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf;
-
- npas = frag_buf->npages;
- page_shift = frag_buf->page_shift;
- }
+ if (err)
+ goto ex;
+ frag_buf = &cq->resize_buf->frag_buf;
+ npas = frag_buf->npages;
+ page_shift = frag_buf->page_shift;
}
- if (err)
- goto ex;
-
inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
@@ -1304,8 +1324,8 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
if (udata)
- mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
- pas, 0);
+ mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas,
+ 0);
else
mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
@@ -1319,6 +1339,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
MLX5_SET(cqc, cqc, cqe_sz,
cqe_sz_to_mlx_sz(cqe_size,
cq->private_flags &
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 9e3d8b826498..819c142857d6 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -93,9 +93,6 @@ struct devx_async_event_file {
struct devx_umem {
struct mlx5_core_dev *mdev;
struct ib_umem *umem;
- u32 page_offset;
- int page_shift;
- int ncont;
u32 dinlen;
u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
};
@@ -1311,7 +1308,7 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
else
ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
obj->dinlen, out, sizeof(out));
- if (ib_is_destroy_retryable(ret, why, uobject))
+ if (ret)
return ret;
devx_event_table = &dev->devx_event_table;
@@ -2057,9 +2054,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
u64 addr;
size_t size;
u32 access;
- int npages;
int err;
- u32 page_mask;
if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
@@ -2073,57 +2068,62 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- err = ib_check_mr_access(access);
+ err = ib_check_mr_access(&dev->ib_dev, access);
if (err)
return err;
obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
-
- mlx5_ib_cont_pages(obj->umem, obj->umem->address,
- MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
- &obj->page_shift, &obj->ncont, NULL);
-
- if (!npages) {
- ib_umem_release(obj->umem);
- return -EINVAL;
- }
-
- page_mask = (1 << obj->page_shift) - 1;
- obj->page_offset = obj->umem->address & page_mask;
-
return 0;
}
-static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
+static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
+ struct uverbs_attr_bundle *attrs,
struct devx_umem *obj,
struct devx_umem_reg_cmd *cmd)
{
+ unsigned int page_size;
+ __be64 *mtt;
+ void *umem;
+
+ /*
+ * We don't know what the user intends to use this umem for, but the HW
+ * restrictions must be met. MR, doorbell records, QP, WQ and CQ all
+ * have different requirements. Since we have no idea how to sort this
+ * out, only support PAGE_SIZE with the expectation that userspace will
+ * provide the necessary alignments inside the known PAGE_SIZE and that
+ * FW will check everything.
+ */
+ page_size = ib_umem_find_best_pgoff(
+ obj->umem, PAGE_SIZE,
+ __mlx5_page_offset_to_bitmask(__mlx5_bit_sz(umem, page_offset),
+ 0));
+ if (!page_size)
+ return -EINVAL;
+
cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
- (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
+ (MLX5_ST_SZ_BYTES(mtt) *
+ ib_umem_num_dma_blocks(obj->umem, page_size));
cmd->in = uverbs_zalloc(attrs, cmd->inlen);
- return PTR_ERR_OR_ZERO(cmd->in);
-}
-
-static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
- struct devx_umem *obj,
- struct devx_umem_reg_cmd *cmd)
-{
- void *umem;
- __be64 *mtt;
+ if (IS_ERR(cmd->in))
+ return PTR_ERR(cmd->in);
umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
- MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
- MLX5_SET(umem, umem, log_page_size, obj->page_shift -
- MLX5_ADAPTER_PAGE_SHIFT);
- MLX5_SET(umem, umem, page_offset, obj->page_offset);
- mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
+ MLX5_SET64(umem, umem, num_of_mtt,
+ ib_umem_num_dma_blocks(obj->umem, page_size));
+ MLX5_SET(umem, umem, log_page_size,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(umem, umem, page_offset,
+ ib_umem_dma_offset(obj->umem, page_size));
+
+ mlx5_ib_populate_pas(obj->umem, page_size, mtt,
(obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
- MLX5_IB_MTT_READ);
+ MLX5_IB_MTT_READ);
+ return 0;
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
@@ -2150,12 +2150,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
if (err)
goto err_obj_free;
- err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
+ err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
if (err)
goto err_umem_release;
- devx_umem_reg_cmd_build(dev, obj, &cmd);
-
MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
sizeof(cmd.out));
@@ -2187,7 +2185,7 @@ static int devx_umem_cleanup(struct ib_uobject *uobject,
int err;
err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
- if (ib_is_destroy_retryable(err, why, uobject))
+ if (err)
return err;
ib_umem_release(obj->umem);
@@ -2600,8 +2598,8 @@ static const struct file_operations devx_async_event_fops = {
.llseek = no_llseek,
};
-static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
struct devx_async_cmd_event_file *comp_ev_file =
container_of(uobj, struct devx_async_cmd_event_file,
@@ -2623,11 +2621,10 @@ static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
kvfree(entry);
}
spin_unlock_irq(&comp_ev_file->ev_queue.lock);
- return 0;
};
-static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
struct devx_async_event_file *ev_file =
container_of(uobj, struct devx_async_event_file,
@@ -2671,7 +2668,6 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
mutex_unlock(&dev->devx_event_table.event_xa_lock);
put_device(&dev->ib_dev.dev);
- return 0;
};
DECLARE_UVERBS_NAMED_METHOD(
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 492cfe063bca..25da0b05b4e2 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -2035,11 +2035,9 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject,
struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_flow_matcher *obj = uobject->object;
- int ret;
- ret = ib_destroy_usecnt(&obj->usecnt, why, uobject);
- if (ret)
- return ret;
+ if (atomic_read(&obj->usecnt))
+ return -EBUSY;
kfree(obj);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 55545f1286e5..aabdc07e4753 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -75,12 +75,6 @@ static LIST_HEAD(mlx5_ib_dev_list);
*/
static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
-/* We can't use an array for xlt_emergency_page because dma_map_single
- * doesn't work on kernel modules memory
- */
-static unsigned long xlt_emergency_page;
-static struct mutex xlt_emergency_page_mutex;
-
struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
{
struct mlx5_ib_dev *dev;
@@ -425,10 +419,22 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_HDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_NDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_HDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_NDR;
+ break;
default:
return -EINVAL;
}
@@ -2628,7 +2634,7 @@ static ssize_t fw_pages_show(struct device *device,
struct mlx5_ib_dev *dev =
rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
- return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
+ return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
}
static DEVICE_ATTR_RO(fw_pages);
@@ -2638,7 +2644,7 @@ static ssize_t reg_pages_show(struct device *device,
struct mlx5_ib_dev *dev =
rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
- return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
+ return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
}
static DEVICE_ATTR_RO(reg_pages);
@@ -2648,7 +2654,7 @@ static ssize_t hca_type_show(struct device *device,
struct mlx5_ib_dev *dev =
rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
- return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
+ return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
}
static DEVICE_ATTR_RO(hca_type);
@@ -2658,7 +2664,7 @@ static ssize_t hw_rev_show(struct device *device,
struct mlx5_ib_dev *dev =
rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
- return sprintf(buf, "%x\n", dev->mdev->rev_id);
+ return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -2668,8 +2674,8 @@ static ssize_t board_id_show(struct device *device,
struct mlx5_ib_dev *dev =
rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
- return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
- dev->mdev->board_id);
+ return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
+ dev->mdev->board_id);
}
static DEVICE_ATTR_RO(board_id);
@@ -3305,8 +3311,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
@@ -3318,8 +3323,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
@@ -3950,7 +3954,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
err = set_has_smi_cap(dev);
if (err)
- return err;
+ goto err_mp;
if (!mlx5_core_mp_enabled(mdev)) {
for (i = 1; i <= dev->num_ports; i++) {
@@ -4024,6 +4028,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.create_cq = mlx5_ib_create_cq,
.create_qp = mlx5_ib_create_qp,
.create_srq = mlx5_ib_create_srq,
+ .create_user_ah = mlx5_ib_create_ah,
.dealloc_pd = mlx5_ib_dealloc_pd,
.dealloc_ucontext = mlx5_ib_dealloc_ucontext,
.del_gid = mlx5_ib_del_gid,
@@ -4141,42 +4146,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
int err;
- dev->ib_dev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_REREG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
- (1ull << IB_USER_VERBS_CMD_OPEN_QP);
- dev->ib_dev.uverbs_ex_cmd_mask =
- (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
-
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
ib_set_device_ops(&dev->ib_dev,
@@ -4187,19 +4156,11 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
- if (MLX5_CAP_GEN(mdev, imaicl)) {
- dev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
+ if (MLX5_CAP_GEN(mdev, imaicl))
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
- }
- if (MLX5_CAP_GEN(mdev, xrc)) {
- dev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
- (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
+ if (MLX5_CAP_GEN(mdev, xrc))
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
- }
if (MLX5_CAP_DEV_MEM(mdev, memic) ||
MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
@@ -4278,12 +4239,6 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- dev->ib_dev.uverbs_ex_cmd_mask |=
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
@@ -4362,7 +4317,7 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
return err;
}
@@ -4878,30 +4833,17 @@ static struct auxiliary_driver mlx5r_driver = {
.id_table = mlx5r_id_table,
};
-unsigned long mlx5_ib_get_xlt_emergency_page(void)
-{
- mutex_lock(&xlt_emergency_page_mutex);
- return xlt_emergency_page;
-}
-
-void mlx5_ib_put_xlt_emergency_page(void)
-{
- mutex_unlock(&xlt_emergency_page_mutex);
-}
-
static int __init mlx5_ib_init(void)
{
int ret;
- xlt_emergency_page = __get_free_page(GFP_KERNEL);
+ xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL);
if (!xlt_emergency_page)
return -ENOMEM;
- mutex_init(&xlt_emergency_page_mutex);
-
mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
if (!mlx5_ib_event_wq) {
- free_page(xlt_emergency_page);
+ free_page((unsigned long)xlt_emergency_page);
return -ENOMEM;
}
@@ -4934,8 +4876,7 @@ static void __exit mlx5_ib_cleanup(void)
mlx5r_rep_cleanup();
destroy_workqueue(mlx5_ib_event_wq);
- mutex_destroy(&xlt_emergency_page_mutex);
- free_page(xlt_emergency_page);
+ free_page((unsigned long)xlt_emergency_page);
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 13de3d2edd34..844545064c9e 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -36,161 +36,65 @@
#include "mlx5_ib.h"
#include <linux/jiffies.h>
-/* @umem: umem object to scan
- * @addr: ib virtual address requested by the user
- * @max_page_shift: high limit for page_shift - 0 means no limit
- * @count: number of PAGE_SIZE pages covered by umem
- * @shift: page shift for the compound pages found in the region
- * @ncont: number of compund pages
- * @order: log2 of the number of compound pages
+/*
+ * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be
+ * filled in the pas array.
*/
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
- unsigned long max_page_shift,
- int *count, int *shift,
- int *ncont, int *order)
+void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
+ u64 access_flags)
{
- unsigned long tmp;
- unsigned long m;
- u64 base = ~0, p = 0;
- u64 len, pfn;
- int i = 0;
- struct scatterlist *sg;
- int entry;
-
- addr = addr >> PAGE_SHIFT;
- tmp = (unsigned long)addr;
- m = find_first_bit(&tmp, BITS_PER_LONG);
- if (max_page_shift)
- m = min_t(unsigned long, max_page_shift - PAGE_SHIFT, m);
-
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- pfn = sg_dma_address(sg) >> PAGE_SHIFT;
- if (base + p != pfn) {
- /* If either the offset or the new
- * base are unaligned update m
- */
- tmp = (unsigned long)(pfn | p);
- if (!IS_ALIGNED(tmp, 1 << m))
- m = find_first_bit(&tmp, BITS_PER_LONG);
-
- base = pfn;
- p = 0;
- }
-
- p += len;
- i += len;
- }
-
- if (i) {
- m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
-
- if (order)
- *order = ilog2(roundup_pow_of_two(i) >> m);
-
- *ncont = DIV_ROUND_UP(i, (1 << m));
- } else {
- m = 0;
+ struct ib_block_iter biter;
- if (order)
- *order = 0;
-
- *ncont = 0;
+ rdma_umem_for_each_dma_block (umem, &biter, page_size) {
+ *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) |
+ access_flags);
+ pas++;
}
- *shift = PAGE_SHIFT + m;
- *count = i;
}
/*
- * Populate the given array with bus addresses from the umem.
- *
- * dev - mlx5_ib device
- * umem - umem to use to fill the pages
- * page_shift - determines the page size used in the resulting array
- * offset - offset into the umem to start from,
- * only implemented for ODP umems
- * num_pages - total number of pages to fill
- * pas - bus addresses array to fill
- * access_flags - access flags to set on all present pages.
- use enum mlx5_ib_mtt_access_flags for this.
+ * Compute the page shift and page_offset for mailboxes that use a quantized
+ * page_offset. The granulatity of the page offset scales according to page
+ * size.
*/
-void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, size_t offset, size_t num_pages,
- __be64 *pas, int access_flags)
+unsigned long __mlx5_umem_find_best_quantized_pgoff(
+ struct ib_umem *umem, unsigned long pgsz_bitmap,
+ unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
+ unsigned int *page_offset_quantized)
{
- int shift = page_shift - PAGE_SHIFT;
- int mask = (1 << shift) - 1;
- int i, k, idx;
- u64 cur = 0;
- u64 base;
- int len;
- struct scatterlist *sg;
- int entry;
-
- i = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- base = sg_dma_address(sg);
-
- /* Skip elements below offset */
- if (i + len < offset << shift) {
- i += len;
- continue;
- }
-
- /* Skip pages below offset */
- if (i < offset << shift) {
- k = (offset << shift) - i;
- i = offset << shift;
- } else {
- k = 0;
- }
-
- for (; k < len; k++) {
- if (!(i & mask)) {
- cur = base + (k << PAGE_SHIFT);
- cur |= access_flags;
- idx = (i >> shift) - offset;
-
- pas[idx] = cpu_to_be64(cur);
- mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
- i >> shift, be64_to_cpu(pas[idx]));
- }
- i++;
-
- /* Stop after num_pages reached */
- if (i >> shift >= offset + num_pages)
- return;
- }
+ const u64 page_offset_mask = (1UL << page_offset_bits) - 1;
+ unsigned long page_size;
+ u64 page_offset;
+
+ page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask);
+ if (!page_size)
+ return 0;
+
+ /*
+ * page size is the largest possible page size.
+ *
+ * Reduce the page_size, and thus the page_offset and quanta, until the
+ * page_offset fits into the mailbox field. Once page_size < scale this
+ * loop is guaranteed to terminate.
+ */
+ page_offset = ib_umem_dma_offset(umem, page_size);
+ while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) {
+ page_size /= 2;
+ page_offset = ib_umem_dma_offset(umem, page_size);
}
-}
-void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int access_flags)
-{
- return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
- ib_umem_num_dma_blocks(umem, PAGE_SIZE),
- pas, access_flags);
-}
-int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
-{
- u64 page_size;
- u64 page_mask;
- u64 off_size;
- u64 off_mask;
- u64 buf_off;
-
- page_size = (u64)1 << page_shift;
- page_mask = page_size - 1;
- buf_off = addr & page_mask;
- off_size = page_size >> 6;
- off_mask = off_size - 1;
-
- if (buf_off & off_mask)
- return -EINVAL;
-
- *offset = buf_off >> ilog2(off_size);
- return 0;
+ /*
+ * The address is not aligned, or otherwise cannot be represented by the
+ * page_offset.
+ */
+ if (!(pgsz_bitmap & page_size))
+ return 0;
+
+ *page_offset_quantized =
+ (unsigned long)page_offset / (page_size / scale);
+ if (WARN_ON(*page_offset_quantized > page_offset_mask))
+ return 0;
+ return page_size;
}
#define WR_ID_BF 0xBF
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index ea5243815cf6..b0fdc1b08e06 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -40,7 +40,73 @@
#define MLX5_IB_DEFAULT_UIDX 0xffffff
#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
-#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
+static __always_inline unsigned long
+__mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
+ unsigned int pgsz_shift)
+{
+ unsigned int largest_pg_shift =
+ min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
+ BITS_PER_LONG - 1);
+
+ /*
+ * Despite a command allowing it, the device does not support lower than
+ * 4k page size.
+ */
+ pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
+ return GENMASK(largest_pg_shift, pgsz_shift);
+}
+
+/*
+ * For mkc users, instead of a page_offset the command has a start_iova which
+ * specifies both the page_offset and the on-the-wire IOVA
+ */
+#define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \
+ ib_umem_find_best_pgsz(umem, \
+ __mlx5_log_page_size_to_bitmap( \
+ __mlx5_bit_sz(typ, log_pgsz_fld), \
+ pgsz_shift), \
+ iova)
+
+static __always_inline unsigned long
+__mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
+ unsigned int offset_shift)
+{
+ unsigned int largest_offset_shift =
+ min_t(unsigned long, page_offset_bits - 1 + offset_shift,
+ BITS_PER_LONG - 1);
+
+ return GENMASK(largest_offset_shift, offset_shift);
+}
+
+/*
+ * QP/CQ/WQ/etc type commands take a page offset that satisifies:
+ * page_offset_quantized * (page_size/scale) = page_offset
+ * Which restricts allowed page sizes to ones that satisify the above.
+ */
+unsigned long __mlx5_umem_find_best_quantized_pgoff(
+ struct ib_umem *umem, unsigned long pgsz_bitmap,
+ unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
+ unsigned int *page_offset_quantized);
+#define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \
+ pgsz_shift, page_offset_fld, \
+ scale, page_offset_quantized) \
+ __mlx5_umem_find_best_quantized_pgoff( \
+ umem, \
+ __mlx5_log_page_size_to_bitmap( \
+ __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
+ __mlx5_bit_sz(typ, page_offset_fld), \
+ GENMASK(31, order_base_2(scale)), scale, \
+ page_offset_quantized)
+
+#define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \
+ pgsz_shift, page_offset_fld, \
+ scale, page_offset_quantized) \
+ __mlx5_umem_find_best_quantized_pgoff( \
+ umem, \
+ __mlx5_log_page_size_to_bitmap( \
+ __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
+ __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
+ page_offset_quantized)
enum {
MLX5_IB_MMAP_OFFSET_START = 9,
@@ -597,14 +663,12 @@ struct mlx5_ib_mr {
int max_descs;
int desc_size;
int access_mode;
+ unsigned int page_shift;
struct mlx5_core_mkey mmkey;
struct ib_umem *umem;
struct mlx5_shared_mr_info *smr_info;
struct list_head list;
- unsigned int order;
struct mlx5_cache_ent *cache_ent;
- int npages;
- struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
void *descs_alloc;
@@ -1042,6 +1106,11 @@ static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
}
+static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
+{
+ return to_mdev(mr->ibmr.device);
+}
+
static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
{
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
@@ -1189,9 +1258,9 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
-int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
- u64 length, u64 virt_addr, int access_flags,
- struct ib_pd *pd, struct ib_udata *udata);
+struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 virt_addr, int access_flags,
+ struct ib_pd *pd, struct ib_udata *udata);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
@@ -1210,7 +1279,6 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
-int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
struct ib_smp *out_mad);
@@ -1230,15 +1298,8 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
- unsigned long max_page_shift,
- int *count, int *shift,
- int *ncont, int *order);
-void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, size_t offset, size_t num_pages,
- __be64 *pas, int access_flags);
-void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int access_flags);
+void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
+ u64 access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
@@ -1283,7 +1344,7 @@ void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags, struct ib_sge *sg_list, u32 num_sge);
-int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable);
+int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
@@ -1305,7 +1366,7 @@ mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
{
return -EOPNOTSUPP;
}
-static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable)
+static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
{
return -EOPNOTSUPP;
}
@@ -1456,8 +1517,7 @@ static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
}
-unsigned long mlx5_ib_get_xlt_emergency_page(void);
-void mlx5_ib_put_xlt_emergency_page(void);
+extern void *xlt_emergency_page;
int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi, u32 bfregn,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index b261797b258f..24f8d59a42ea 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -41,6 +41,13 @@
#include <rdma/ib_verbs.h>
#include "mlx5_ib.h"
+/*
+ * We can't use an array for xlt_emergency_page because dma_map_single doesn't
+ * work on kernel modules memory
+ */
+void *xlt_emergency_page;
+static DEFINE_MUTEX(xlt_emergency_page_mutex);
+
enum {
MAX_PENDING_REG_MR = 8,
};
@@ -49,6 +56,9 @@ enum {
static void
create_mkey_callback(int status, struct mlx5_async_work *context);
+static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
+ u64 iova, int access_flags,
+ unsigned int page_size, bool populate);
static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
struct ib_pd *pd)
@@ -123,19 +133,12 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-static inline bool mlx5_ib_pas_fits_in_mr(struct mlx5_ib_mr *mr, u64 start,
- u64 length)
-{
- return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
- length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
-}
-
static void create_mkey_callback(int status, struct mlx5_async_work *context)
{
struct mlx5_ib_mr *mr =
container_of(context, struct mlx5_ib_mr, cb_work);
- struct mlx5_ib_dev *dev = mr->dev;
struct mlx5_cache_ent *ent = mr->cache_ent;
+ struct mlx5_ib_dev *dev = ent->dev;
unsigned long flags;
if (status) {
@@ -172,9 +175,7 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return NULL;
- mr->order = ent->order;
mr->cache_ent = ent;
- mr->dev = ent->dev;
set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
MLX5_SET(mkc, mkc, free, 1);
@@ -642,6 +643,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
if (mlx5_mr_cache_invalidate(mr)) {
detach_mr_from_cache(mr);
destroy_mkey(dev, mr);
+ kfree(mr);
return;
}
@@ -867,57 +869,6 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev)
return MLX5_MAX_UMR_SHIFT;
}
-static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length,
- int access_flags, struct ib_umem **umem, int *npages,
- int *page_shift, int *ncont, int *order)
-{
- struct ib_umem *u;
-
- *umem = NULL;
-
- if (access_flags & IB_ACCESS_ON_DEMAND) {
- struct ib_umem_odp *odp;
-
- odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
- &mlx5_mn_ops);
- if (IS_ERR(odp)) {
- mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
- PTR_ERR(odp));
- return PTR_ERR(odp);
- }
-
- u = &odp->umem;
-
- *page_shift = odp->page_shift;
- *ncont = ib_umem_odp_num_pages(odp);
- *npages = *ncont << (*page_shift - PAGE_SHIFT);
- if (order)
- *order = ilog2(roundup_pow_of_two(*ncont));
- } else {
- u = ib_umem_get(&dev->ib_dev, start, length, access_flags);
- if (IS_ERR(u)) {
- mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
- return PTR_ERR(u);
- }
-
- mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
- page_shift, ncont, order);
- }
-
- if (!*npages) {
- mlx5_ib_warn(dev, "avoid zero region\n");
- ib_umem_release(u);
- return -EINVAL;
- }
-
- *umem = u;
-
- mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
- *npages, *ncont, *order, *page_shift);
-
- return 0;
-}
-
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct mlx5_ib_umr_context *context =
@@ -974,25 +925,49 @@ static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
return &cache->ent[order];
}
-static struct mlx5_ib_mr *
-alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr,
- u64 len, int npages, int page_shift, unsigned int order,
- int access_flags)
+static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
+ u64 length, int access_flags)
+{
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+ mr->ibmr.length = length;
+ mr->ibmr.device = &dev->ib_dev;
+ mr->access_flags = access_flags;
+}
+
+static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
+ struct ib_umem *umem, u64 iova,
+ int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order);
+ struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
+ unsigned int page_size;
- if (!ent)
- return ERR_PTR(-E2BIG);
-
- /* Matches access in alloc_cache_mr() */
- if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
- return ERR_PTR(-EOPNOTSUPP);
+ page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, 0, iova);
+ if (WARN_ON(!page_size))
+ return ERR_PTR(-EINVAL);
+ ent = mr_cache_ent_from_order(
+ dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
+ /*
+ * Matches access in alloc_cache_mr(). If the MR can't come from the
+ * cache then synchronously create an uncached one.
+ */
+ if (!ent || ent->limit == 0 ||
+ !mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) {
+ mutex_lock(&dev->slow_path_mutex);
+ mr = reg_create(pd, umem, iova, access_flags, page_size, false);
+ mutex_unlock(&dev->slow_path_mutex);
+ return mr;
+ }
mr = get_cache_mr(ent);
if (!mr) {
mr = create_cache_mr(ent);
+ /*
+ * The above already tried to do the same stuff as reg_create(),
+ * no reason to try it again.
+ */
if (IS_ERR(mr))
return mr;
}
@@ -1001,9 +976,12 @@ alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr,
mr->umem = umem;
mr->access_flags = access_flags;
mr->desc_size = sizeof(struct mlx5_mtt);
- mr->mmkey.iova = virt_addr;
- mr->mmkey.size = len;
+ mr->mmkey.iova = iova;
+ mr->mmkey.size = umem->length;
mr->mmkey.pd = to_mpd(pd)->pdn;
+ mr->page_shift = order_base_2(page_size);
+ mr->umem = umem;
+ set_mr_fields(dev, mr, umem->length, access_flags);
return mr;
}
@@ -1012,14 +990,144 @@ alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr,
MLX5_UMR_MTT_ALIGNMENT)
#define MLX5_SPARE_UMR_CHUNK 0x10000
+/*
+ * Allocate a temporary buffer to hold the per-page information to transfer to
+ * HW. For efficiency this should be as large as it can be, but buffer
+ * allocation failure is not allowed, so try smaller sizes.
+ */
+static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
+{
+ const size_t xlt_chunk_align =
+ MLX5_UMR_MTT_ALIGNMENT / sizeof(ent_size);
+ size_t size;
+ void *res = NULL;
+
+ static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
+
+ /*
+ * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
+ * allocation can't trigger any kind of reclaim.
+ */
+ might_sleep();
+
+ gfp_mask |= __GFP_ZERO;
+
+ /*
+ * If the system already has a suitable high order page then just use
+ * that, but don't try hard to create one. This max is about 1M, so a
+ * free x86 huge page will satisfy it.
+ */
+ size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
+ MLX5_MAX_UMR_CHUNK);
+ *nents = size / ent_size;
+ res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
+ get_order(size));
+ if (res)
+ return res;
+
+ if (size > MLX5_SPARE_UMR_CHUNK) {
+ size = MLX5_SPARE_UMR_CHUNK;
+ *nents = get_order(size) / ent_size;
+ res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
+ get_order(size));
+ if (res)
+ return res;
+ }
+
+ *nents = PAGE_SIZE / ent_size;
+ res = (void *)__get_free_page(gfp_mask);
+ if (res)
+ return res;
+
+ mutex_lock(&xlt_emergency_page_mutex);
+ memset(xlt_emergency_page, 0, PAGE_SIZE);
+ return xlt_emergency_page;
+}
+
+static void mlx5_ib_free_xlt(void *xlt, size_t length)
+{
+ if (xlt == xlt_emergency_page) {
+ mutex_unlock(&xlt_emergency_page_mutex);
+ return;
+ }
+
+ free_pages((unsigned long)xlt, get_order(length));
+}
+
+/*
+ * Create a MLX5_IB_SEND_UMR_UPDATE_XLT work request and XLT buffer ready for
+ * submission.
+ */
+static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
+ struct mlx5_umr_wr *wr, struct ib_sge *sg,
+ size_t nents, size_t ent_size,
+ unsigned int flags)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct device *ddev = &dev->mdev->pdev->dev;
+ dma_addr_t dma;
+ void *xlt;
+
+ xlt = mlx5_ib_alloc_xlt(&nents, ent_size,
+ flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
+ GFP_KERNEL);
+ sg->length = nents * ent_size;
+ dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
+ mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
+ mlx5_ib_free_xlt(xlt, sg->length);
+ return NULL;
+ }
+ sg->addr = dma;
+ sg->lkey = dev->umrc.pd->local_dma_lkey;
+
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
+ if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
+ wr->wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+ wr->wr.sg_list = sg;
+ wr->wr.num_sge = 1;
+ wr->wr.opcode = MLX5_IB_WR_UMR;
+ wr->pd = mr->ibmr.pd;
+ wr->mkey = mr->mmkey.key;
+ wr->length = mr->mmkey.size;
+ wr->virt_addr = mr->mmkey.iova;
+ wr->access_flags = mr->access_flags;
+ wr->page_shift = mr->page_shift;
+ wr->xlt_size = sg->length;
+ return xlt;
+}
+
+static void mlx5_ib_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
+ struct ib_sge *sg)
+{
+ struct device *ddev = &dev->mdev->pdev->dev;
+
+ dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
+ mlx5_ib_free_xlt(xlt, sg->length);
+}
+
+static unsigned int xlt_wr_final_send_flags(unsigned int flags)
+{
+ unsigned int res = 0;
+
+ if (flags & MLX5_IB_UPD_XLT_ENABLE)
+ res |= MLX5_IB_SEND_UMR_ENABLE_MR |
+ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
+ MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
+ if (flags & MLX5_IB_UPD_XLT_PD || flags & MLX5_IB_UPD_XLT_ACCESS)
+ res |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
+ if (flags & MLX5_IB_UPD_XLT_ADDR)
+ res |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
+ return res;
+}
+
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags)
{
- struct mlx5_ib_dev *dev = mr->dev;
- struct device *ddev = dev->ib_dev.dev.parent;
- int size;
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct device *ddev = &dev->mdev->pdev->dev;
void *xlt;
- dma_addr_t dma;
struct mlx5_umr_wr wr;
struct ib_sge sg;
int err = 0;
@@ -1030,15 +1138,17 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
const int page_mask = page_align - 1;
size_t pages_mapped = 0;
size_t pages_to_map = 0;
- size_t pages_iter = 0;
+ size_t pages_iter;
size_t size_to_map = 0;
- gfp_t gfp;
- bool use_emergency_page = false;
+ size_t orig_sg_length;
if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
!umr_can_use_indirect_mkey(dev))
return -EPERM;
+ if (WARN_ON(!mr->umem->is_odp))
+ return -EINVAL;
+
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
* so we need to align the offset and length accordingly
*/
@@ -1046,63 +1156,21 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
npages += idx & page_mask;
idx &= ~page_mask;
}
-
- gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
- gfp |= __GFP_ZERO | __GFP_NOWARN;
-
pages_to_map = ALIGN(npages, page_align);
- size = desc_size * pages_to_map;
- size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
-
- xlt = (void *)__get_free_pages(gfp, get_order(size));
- if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
- mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
- size, get_order(size), MLX5_SPARE_UMR_CHUNK);
-
- size = MLX5_SPARE_UMR_CHUNK;
- xlt = (void *)__get_free_pages(gfp, get_order(size));
- }
- if (!xlt) {
- mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
- xlt = (void *)mlx5_ib_get_xlt_emergency_page();
- size = PAGE_SIZE;
- memset(xlt, 0, size);
- use_emergency_page = true;
- }
- pages_iter = size / desc_size;
- dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, dma)) {
- mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
- err = -ENOMEM;
- goto free_xlt;
- }
+ xlt = mlx5_ib_create_xlt_wr(mr, &wr, &sg, npages, desc_size, flags);
+ if (!xlt)
+ return -ENOMEM;
+ pages_iter = sg.length / desc_size;
+ orig_sg_length = sg.length;
- if (mr->umem->is_odp) {
- if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
- struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
+ if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
- pages_to_map = min_t(size_t, pages_to_map, max_pages);
- }
+ pages_to_map = min_t(size_t, pages_to_map, max_pages);
}
- sg.addr = dma;
- sg.lkey = dev->umrc.pd->local_dma_lkey;
-
- memset(&wr, 0, sizeof(wr));
- wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
- if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
- wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
- wr.wr.sg_list = &sg;
- wr.wr.num_sge = 1;
- wr.wr.opcode = MLX5_IB_WR_UMR;
-
- wr.pd = mr->ibmr.pd;
- wr.mkey = mr->mmkey.key;
- wr.length = mr->mmkey.size;
- wr.virt_addr = mr->mmkey.iova;
- wr.access_flags = mr->access_flags;
wr.page_shift = page_shift;
for (pages_mapped = 0;
@@ -1110,50 +1178,87 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
pages_mapped += pages_iter, idx += pages_iter) {
npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
size_to_map = npages * desc_size;
- dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
- if (mr->umem->is_odp) {
- mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
- } else {
- __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx,
- npages, xlt,
- MLX5_IB_MTT_PRESENT);
- /* Clear padding after the pages
- * brought from the umem.
- */
- memset(xlt + size_to_map, 0, size - size_to_map);
- }
- dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+ mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ dma_sync_single_for_device(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
- if (pages_mapped + pages_iter >= pages_to_map) {
- if (flags & MLX5_IB_UPD_XLT_ENABLE)
- wr.wr.send_flags |=
- MLX5_IB_SEND_UMR_ENABLE_MR |
- MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
- MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
- if (flags & MLX5_IB_UPD_XLT_PD ||
- flags & MLX5_IB_UPD_XLT_ACCESS)
- wr.wr.send_flags |=
- MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
- if (flags & MLX5_IB_UPD_XLT_ADDR)
- wr.wr.send_flags |=
- MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
- }
+ if (pages_mapped + pages_iter >= pages_to_map)
+ wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
wr.offset = idx * desc_size;
wr.xlt_size = sg.length;
err = mlx5_ib_post_send_wait(dev, &wr);
}
- dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
+ sg.length = orig_sg_length;
+ mlx5_ib_unmap_free_xlt(dev, xlt, &sg);
+ return err;
+}
+
+/*
+ * Send the DMA list to the HW for a normal MR using UMR.
+ */
+static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct device *ddev = &dev->mdev->pdev->dev;
+ struct ib_block_iter biter;
+ struct mlx5_mtt *cur_mtt;
+ struct mlx5_umr_wr wr;
+ size_t orig_sg_length;
+ struct mlx5_mtt *mtt;
+ size_t final_size;
+ struct ib_sge sg;
+ int err = 0;
+
+ if (WARN_ON(mr->umem->is_odp))
+ return -EINVAL;
+
+ mtt = mlx5_ib_create_xlt_wr(mr, &wr, &sg,
+ ib_umem_num_dma_blocks(mr->umem,
+ 1 << mr->page_shift),
+ sizeof(*mtt), flags);
+ if (!mtt)
+ return -ENOMEM;
+ orig_sg_length = sg.length;
+
+ cur_mtt = mtt;
+ rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap,
+ BIT(mr->page_shift)) {
+ if (cur_mtt == (void *)mtt + sg.length) {
+ dma_sync_single_for_device(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+ err = mlx5_ib_post_send_wait(dev, &wr);
+ if (err)
+ goto err;
+ dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+ wr.offset += sg.length;
+ cur_mtt = mtt;
+ }
+
+ cur_mtt->ptag =
+ cpu_to_be64(rdma_block_iter_dma_address(&biter) |
+ MLX5_IB_MTT_PRESENT);
+ cur_mtt++;
+ }
-free_xlt:
- if (use_emergency_page)
- mlx5_ib_put_xlt_emergency_page();
- else
- free_pages((unsigned long)xlt, get_order(size));
+ final_size = (void *)cur_mtt - (void *)mtt;
+ sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
+ memset(cur_mtt, 0, sg.length - final_size);
+ wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
+ wr.xlt_size = sg.length;
+ dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
+ err = mlx5_ib_post_send_wait(dev, &wr);
+
+err:
+ sg.length = orig_sg_length;
+ mlx5_ib_unmap_free_xlt(dev, mtt, &sg);
return err;
}
@@ -1161,11 +1266,9 @@ free_xlt:
* If ibmr is NULL it will be allocated by reg_create.
* Else, the given ibmr will be used.
*/
-static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
- u64 virt_addr, u64 length,
- struct ib_umem *umem, int npages,
- int page_shift, int access_flags,
- bool populate)
+static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
+ u64 iova, int access_flags,
+ unsigned int page_size, bool populate)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr;
@@ -1176,16 +1279,20 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
int err;
bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
- mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!page_size)
+ return ERR_PTR(-EINVAL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
mr->ibmr.pd = pd;
mr->access_flags = access_flags;
+ mr->page_shift = order_base_2(page_size);
inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
if (populate)
- inlen += sizeof(*pas) * roundup(npages, 2);
+ inlen += sizeof(*pas) *
+ roundup(ib_umem_num_dma_blocks(umem, page_size), 2);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
@@ -1197,7 +1304,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
err = -EINVAL;
goto err_2;
}
- mlx5_ib_populate_pas(dev, umem, page_shift, pas,
+ mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
}
@@ -1206,20 +1313,20 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr,
+ set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
populate ? pd : dev->umrc.pd);
MLX5_SET(mkc, mkc, free, !populate);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
MLX5_SET(mkc, mkc, umr_en, 1);
- MLX5_SET64(mkc, mkc, len, length);
+ MLX5_SET64(mkc, mkc, len, umem->length);
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
MLX5_SET(mkc, mkc, translations_octword_size,
- get_octo_len(virt_addr, length, page_shift));
- MLX5_SET(mkc, mkc, log_page_size, page_shift);
+ get_octo_len(iova, umem->length, mr->page_shift));
+ MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
if (populate) {
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
- get_octo_len(virt_addr, length, page_shift));
+ get_octo_len(iova, umem->length, mr->page_shift));
}
err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
@@ -1229,7 +1336,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
}
mr->mmkey.type = MLX5_MKEY_MR;
mr->desc_size = sizeof(struct mlx5_mtt);
- mr->dev = dev;
+ mr->umem = umem;
+ set_mr_fields(dev, mr, umem->length, access_flags);
kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
@@ -1238,25 +1346,11 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
err_2:
kvfree(in);
-
err_1:
- if (!ibmr)
- kfree(mr);
-
+ kfree(mr);
return ERR_PTR(err);
}
-static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- int npages, u64 length, int access_flags)
-{
- mr->npages = npages;
- atomic_add(npages, &dev->mdev->priv.reg_pages);
- mr->ibmr.lkey = mr->mmkey.key;
- mr->ibmr.rkey = mr->mmkey.key;
- mr->ibmr.length = length;
- mr->access_flags = access_flags;
-}
-
static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
u64 length, int acc, int mode)
{
@@ -1290,8 +1384,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
kfree(in);
- mr->umem = NULL;
- set_mr_fields(dev, mr, 0, length, acc);
+ set_mr_fields(dev, mr, length, acc);
return &mr->ibmr;
@@ -1352,116 +1445,128 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
attr->access_flags, mode);
}
-struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int access_flags,
- struct ib_udata *udata)
+static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
+ u64 iova, int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
bool xlt_with_umr;
- struct ib_umem *umem;
- int page_shift;
- int npages;
- int ncont;
- int order;
int err;
- if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
- return ERR_PTR(-EOPNOTSUPP);
-
- mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
- start, virt_addr, length, access_flags);
-
- xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, length);
- /* ODP requires xlt update via umr to work. */
- if (!xlt_with_umr && (access_flags & IB_ACCESS_ON_DEMAND))
- return ERR_PTR(-EINVAL);
-
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
- length == U64_MAX) {
- if (virt_addr != start)
- return ERR_PTR(-EINVAL);
- if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
- !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
- return ERR_PTR(-EINVAL);
-
- mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
- if (IS_ERR(mr))
- return ERR_CAST(mr);
- return &mr->ibmr;
- }
-
- err = mr_umem_get(dev, start, length, access_flags, &umem,
- &npages, &page_shift, &ncont, &order);
-
- if (err < 0)
- return ERR_PTR(err);
-
+ xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, umem->length);
if (xlt_with_umr) {
- mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
- page_shift, order, access_flags);
- if (IS_ERR(mr))
- mr = NULL;
- }
+ mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
+ } else {
+ unsigned int page_size = mlx5_umem_find_best_pgsz(
+ umem, mkc, log_page_size, 0, iova);
- if (!mr) {
mutex_lock(&dev->slow_path_mutex);
- mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags, !xlt_with_umr);
+ mr = reg_create(pd, umem, iova, access_flags, page_size, true);
mutex_unlock(&dev->slow_path_mutex);
}
-
if (IS_ERR(mr)) {
- err = PTR_ERR(mr);
- goto error;
+ ib_umem_release(umem);
+ return ERR_CAST(mr);
}
mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
- mr->umem = umem;
- set_mr_fields(dev, mr, npages, length, access_flags);
+ atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
- if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
+ if (xlt_with_umr) {
/*
* If the MR was created with reg_create then it will be
* configured properly but left disabled. It is safe to go ahead
* and configure it again via UMR while enabling it.
*/
- int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
-
- err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
- update_xlt_flags);
+ err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
if (err) {
dereg_mr(dev, mr);
return ERR_PTR(err);
}
}
+ return &mr->ibmr;
+}
- if (is_odp_mr(mr)) {
- to_ib_umem_odp(mr->umem)->private = mr;
- init_waitqueue_head(&mr->q_deferred_work);
- atomic_set(&mr->num_deferred_work, 0);
- err = xa_err(xa_store(&dev->odp_mkeys,
- mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
- GFP_KERNEL));
- if (err) {
- dereg_mr(dev, mr);
- return ERR_PTR(err);
- }
+static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 iova, int access_flags,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem_odp *odp;
+ struct mlx5_ib_mr *mr;
+ int err;
- err = mlx5_ib_init_odp_mr(mr, xlt_with_umr);
- if (err) {
- dereg_mr(dev, mr);
- return ERR_PTR(err);
- }
+ if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!start && length == U64_MAX) {
+ if (iova != 0)
+ return ERR_PTR(-EINVAL);
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+ return ERR_PTR(-EINVAL);
+
+ mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
+ if (IS_ERR(mr))
+ return ERR_CAST(mr);
+ return &mr->ibmr;
+ }
+
+ /* ODP requires xlt update via umr to work. */
+ if (!mlx5_ib_can_load_pas_with_umr(dev, length))
+ return ERR_PTR(-EINVAL);
+
+ odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
+ &mlx5_mn_ops);
+ if (IS_ERR(odp))
+ return ERR_CAST(odp);
+
+ mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
+ if (IS_ERR(mr)) {
+ ib_umem_release(&odp->umem);
+ return ERR_CAST(mr);
}
+ odp->private = mr;
+ init_waitqueue_head(&mr->q_deferred_work);
+ atomic_set(&mr->num_deferred_work, 0);
+ err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_KERNEL));
+ if (err)
+ goto err_dereg_mr;
+
+ err = mlx5_ib_init_odp_mr(mr);
+ if (err)
+ goto err_dereg_mr;
return &mr->ibmr;
-error:
- ib_umem_release(umem);
+
+err_dereg_mr:
+ dereg_mr(dev, mr);
return ERR_PTR(err);
}
+struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 iova, int access_flags,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem *umem;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ start, iova, length, access_flags);
+
+ if (access_flags & IB_ACCESS_ON_DEMAND)
+ return create_user_odp_mr(pd, start, length, iova, access_flags,
+ udata);
+ umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
+ if (IS_ERR(umem))
+ return ERR_CAST(umem);
+ return create_real_mr(pd, umem, iova, access_flags);
+}
+
/**
* mlx5_mr_cache_invalidate - Fence all DMA on the MR
* @mr: The MR to fence
@@ -1474,151 +1579,224 @@ int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
{
struct mlx5_umr_wr umrwr = {};
- if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
- umrwr.pd = mr->dev->umrc.pd;
+ umrwr.pd = mr_to_mdev(mr)->umrc.pd;
umrwr.mkey = mr->mmkey.key;
umrwr.ignore_free_state = 1;
- return mlx5_ib_post_send_wait(mr->dev, &umrwr);
+ return mlx5_ib_post_send_wait(mr_to_mdev(mr), &umrwr);
}
-static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
- int access_flags, int flags)
-{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_umr_wr umrwr = {};
+/*
+ * True if the change in access flags can be done via UMR, only some access
+ * flags can be updated.
+ */
+static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
+ unsigned int current_access_flags,
+ unsigned int target_access_flags)
+{
+ unsigned int diffs = current_access_flags ^ target_access_flags;
+
+ if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
+ return false;
+ return mlx5_ib_can_reconfig_with_umr(dev, current_access_flags,
+ target_access_flags);
+}
+
+static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ struct mlx5_umr_wr umrwr = {
+ .wr = {
+ .send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
+ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS,
+ .opcode = MLX5_IB_WR_UMR,
+ },
+ .mkey = mr->mmkey.key,
+ .pd = pd,
+ .access_flags = access_flags,
+ };
int err;
- umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
-
- umrwr.wr.opcode = MLX5_IB_WR_UMR;
- umrwr.mkey = mr->mmkey.key;
-
- if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
- umrwr.pd = pd;
- umrwr.access_flags = access_flags;
- umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
- }
-
err = mlx5_ib_post_send_wait(dev, &umrwr);
+ if (err)
+ return err;
- return err;
+ mr->access_flags = access_flags;
+ mr->mmkey.pd = to_mpd(pd)->pdn;
+ return 0;
}
-int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
- u64 length, u64 virt_addr, int new_access_flags,
- struct ib_pd *new_pd, struct ib_udata *udata)
+static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
+ struct ib_umem *new_umem,
+ int new_access_flags, u64 iova,
+ unsigned long *page_size)
{
- struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
- struct mlx5_ib_mr *mr = to_mmr(ib_mr);
- struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
- int access_flags = flags & IB_MR_REREG_ACCESS ?
- new_access_flags :
- mr->access_flags;
- int page_shift = 0;
- int upd_flags = 0;
- int npages = 0;
- int ncont = 0;
- int order = 0;
- u64 addr, len;
- int err;
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
- start, virt_addr, length, access_flags);
+ /* We only track the allocated sizes of MRs from the cache */
+ if (!mr->cache_ent)
+ return false;
+ if (!mlx5_ib_can_load_pas_with_umr(dev, new_umem->length))
+ return false;
- atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+ *page_size =
+ mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
+ if (WARN_ON(!*page_size))
+ return false;
+ return (1ULL << mr->cache_ent->order) >=
+ ib_umem_num_dma_blocks(new_umem, *page_size);
+}
- if (!mr->umem)
- return -EINVAL;
+static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags, int flags, struct ib_umem *new_umem,
+ u64 iova, unsigned long page_size)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE;
+ struct ib_umem *old_umem = mr->umem;
+ int err;
- if (is_odp_mr(mr))
- return -EOPNOTSUPP;
+ /*
+ * To keep everything simple the MR is revoked before we start to mess
+ * with it. This ensure the change is atomic relative to any use of the
+ * MR.
+ */
+ err = mlx5_mr_cache_invalidate(mr);
+ if (err)
+ return err;
- if (flags & IB_MR_REREG_TRANS) {
- addr = virt_addr;
- len = length;
- } else {
- addr = mr->umem->address;
- len = mr->umem->length;
+ if (flags & IB_MR_REREG_PD) {
+ mr->ibmr.pd = pd;
+ mr->mmkey.pd = to_mpd(pd)->pdn;
+ upd_flags |= MLX5_IB_UPD_XLT_PD;
+ }
+ if (flags & IB_MR_REREG_ACCESS) {
+ mr->access_flags = access_flags;
+ upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
}
- if (flags != IB_MR_REREG_PD) {
+ mr->ibmr.length = new_umem->length;
+ mr->mmkey.iova = iova;
+ mr->mmkey.size = new_umem->length;
+ mr->page_shift = order_base_2(page_size);
+ mr->umem = new_umem;
+ err = mlx5_ib_update_mr_pas(mr, upd_flags);
+ if (err) {
/*
- * Replace umem. This needs to be done whether or not UMR is
- * used.
+ * The MR is revoked at this point so there is no issue to free
+ * new_umem.
*/
- flags |= IB_MR_REREG_TRANS;
- ib_umem_release(mr->umem);
- mr->umem = NULL;
- err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
- &npages, &page_shift, &ncont, &order);
- if (err)
- goto err;
+ mr->umem = old_umem;
+ return err;
}
- if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
- access_flags) ||
- !mlx5_ib_can_load_pas_with_umr(dev, len) ||
- (flags & IB_MR_REREG_TRANS &&
- !mlx5_ib_pas_fits_in_mr(mr, addr, len))) {
- /*
- * UMR can't be used - MKey needs to be replaced.
- */
- if (mr->cache_ent)
- detach_mr_from_cache(mr);
- err = destroy_mkey(dev, mr);
- if (err)
- goto err;
+ atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages);
+ ib_umem_release(old_umem);
+ atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages);
+ return 0;
+}
- mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
- page_shift, access_flags, true);
+struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 iova, int new_access_flags,
+ struct ib_pd *new_pd,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ib_mr);
+ int err;
- if (IS_ERR(mr)) {
- err = PTR_ERR(mr);
- mr = to_mmr(ib_mr);
- goto err;
- }
- } else {
- /*
- * Send a UMR WQE
- */
- mr->ibmr.pd = pd;
- mr->access_flags = access_flags;
- mr->mmkey.iova = addr;
- mr->mmkey.size = len;
- mr->mmkey.pd = to_mpd(pd)->pdn;
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ return ERR_PTR(-EOPNOTSUPP);
- if (flags & IB_MR_REREG_TRANS) {
- upd_flags = MLX5_IB_UPD_XLT_ADDR;
- if (flags & IB_MR_REREG_PD)
- upd_flags |= MLX5_IB_UPD_XLT_PD;
- if (flags & IB_MR_REREG_ACCESS)
- upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
- err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
- upd_flags);
- } else {
- err = rereg_umr(pd, mr, access_flags, flags);
+ mlx5_ib_dbg(
+ dev,
+ "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ start, iova, length, new_access_flags);
+
+ if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!(flags & IB_MR_REREG_ACCESS))
+ new_access_flags = mr->access_flags;
+ if (!(flags & IB_MR_REREG_PD))
+ new_pd = ib_mr->pd;
+
+ if (!(flags & IB_MR_REREG_TRANS)) {
+ struct ib_umem *umem;
+
+ /* Fast path for PD/access change */
+ if (can_use_umr_rereg_access(dev, mr->access_flags,
+ new_access_flags)) {
+ err = umr_rereg_pd_access(mr, new_pd, new_access_flags);
+ if (err)
+ return ERR_PTR(err);
+ return NULL;
}
+ /* DM or ODP MR's don't have a umem so we can't re-use it */
+ if (!mr->umem || is_odp_mr(mr))
+ goto recreate;
+ /*
+ * Only one active MR can refer to a umem at one time, revoke
+ * the old MR before assigning the umem to the new one.
+ */
+ err = mlx5_mr_cache_invalidate(mr);
if (err)
- goto err;
- }
-
- set_mr_fields(dev, mr, npages, len, access_flags);
+ return ERR_PTR(err);
+ umem = mr->umem;
+ mr->umem = NULL;
+ atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
- return 0;
+ return create_real_mr(new_pd, umem, mr->mmkey.iova,
+ new_access_flags);
+ }
-err:
- ib_umem_release(mr->umem);
- mr->umem = NULL;
+ /*
+ * DM doesn't have a PAS list so we can't re-use it, odp does but the
+ * logic around releasing the umem is different
+ */
+ if (!mr->umem || is_odp_mr(mr))
+ goto recreate;
+
+ if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
+ can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
+ struct ib_umem *new_umem;
+ unsigned long page_size;
+
+ new_umem = ib_umem_get(&dev->ib_dev, start, length,
+ new_access_flags);
+ if (IS_ERR(new_umem))
+ return ERR_CAST(new_umem);
+
+ /* Fast path for PAS change */
+ if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
+ &page_size)) {
+ err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
+ new_umem, iova, page_size);
+ if (err) {
+ ib_umem_release(new_umem);
+ return ERR_PTR(err);
+ }
+ return NULL;
+ }
+ return create_real_mr(new_pd, new_umem, iova, new_access_flags);
+ }
- clean_mr(dev, mr);
- return err;
+ /*
+ * Everything else has no state we can preserve, just create a new MR
+ * from scratch
+ */
+recreate:
+ return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
+ new_access_flags, udata);
}
static int
@@ -1627,6 +1805,8 @@ mlx5_alloc_priv_descs(struct ib_device *device,
int ndescs,
int desc_size)
{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct device *ddev = &dev->mdev->pdev->dev;
int size = ndescs * desc_size;
int add_size;
int ret;
@@ -1639,9 +1819,8 @@ mlx5_alloc_priv_descs(struct ib_device *device,
mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
- mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
+ mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, mr->desc_map)) {
ret = -ENOMEM;
goto err;
}
@@ -1659,9 +1838,10 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
if (mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
+ struct mlx5_ib_dev *dev = to_mdev(device);
- dma_unmap_single(device->dev.parent, mr->desc_map,
- size, DMA_TO_DEVICE);
+ dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
+ DMA_TO_DEVICE);
kfree(mr->descs_alloc);
mr->descs = NULL;
}
@@ -1691,7 +1871,6 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int npages = mr->npages;
struct ib_umem *umem = mr->umem;
/* Stop all DMA */
@@ -1700,14 +1879,17 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
else
clean_mr(dev, mr);
+ if (umem) {
+ if (!is_odp_mr(mr))
+ atomic_sub(ib_umem_num_pages(umem),
+ &dev->mdev->priv.reg_pages);
+ ib_umem_release(umem);
+ }
+
if (mr->cache_ent)
mlx5_mr_cache_free(dev, mr);
else
kfree(mr);
-
- ib_umem_release(umem);
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
-
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 5c853ec1b0d8..aa2413b50adc 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -102,7 +102,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
if (flags & MLX5_IB_UPD_XLT_ZAP) {
for (; pklm != end; pklm++, idx++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- pklm->key = cpu_to_be32(imr->dev->null_mkey);
+ pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
pklm->va = 0;
}
return;
@@ -129,7 +129,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
* locking around the xarray.
*/
lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
- lockdep_assert_held(&imr->dev->odp_srcu);
+ lockdep_assert_held(&mr_to_mdev(imr)->odp_srcu);
for (; pklm != end; pklm++, idx++) {
struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
@@ -139,7 +139,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
} else {
- pklm->key = cpu_to_be32(imr->dev->null_mkey);
+ pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
pklm->va = 0;
}
}
@@ -199,7 +199,7 @@ static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
mutex_unlock(&odp->umem_mutex);
if (!mr->cache_ent) {
- mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey);
+ mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, &mr->mmkey);
WARN_ON(mr->descs);
}
}
@@ -222,19 +222,19 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
WARN_ON(atomic_read(&mr->num_deferred_work));
if (need_imr_xlt) {
- srcu_key = srcu_read_lock(&mr->dev->odp_srcu);
+ srcu_key = srcu_read_lock(&mr_to_mdev(mr)->odp_srcu);
mutex_lock(&odp_imr->umem_mutex);
mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&odp_imr->umem_mutex);
- srcu_read_unlock(&mr->dev->odp_srcu, srcu_key);
+ srcu_read_unlock(&mr_to_mdev(mr)->odp_srcu, srcu_key);
}
dma_fence_odp_mr(mr);
mr->parent = NULL;
- mlx5_mr_cache_free(mr->dev, mr);
+ mlx5_mr_cache_free(mr_to_mdev(mr), mr);
ib_umem_odp_release(odp);
if (atomic_dec_and_test(&imr->num_deferred_work))
wake_up(&imr->q_deferred_work);
@@ -274,7 +274,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
goto out_unlock;
atomic_inc(&imr->num_deferred_work);
- call_srcu(&mr->dev->odp_srcu, &mr->odp_destroy.rcu,
+ call_srcu(&mr_to_mdev(mr)->odp_srcu, &mr->odp_destroy.rcu,
free_implicit_child_mr_rcu);
out_unlock:
@@ -476,12 +476,13 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
if (IS_ERR(odp))
return ERR_CAST(odp);
- ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY,
- imr->access_flags);
+ ret = mr = mlx5_mr_cache_alloc(
+ mr_to_mdev(imr), MLX5_IMR_MTT_CACHE_ENTRY, imr->access_flags);
if (IS_ERR(mr))
goto out_umem;
mr->ibmr.pd = imr->ibmr.pd;
+ mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
@@ -517,11 +518,11 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
goto out_mr;
}
- mlx5_ib_dbg(imr->dev, "key %x mr %p\n", mr->mmkey.key, mr);
+ mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
return mr;
out_mr:
- mlx5_mr_cache_free(imr->dev, mr);
+ mlx5_mr_cache_free(mr_to_mdev(imr), mr);
out_umem:
ib_umem_odp_release(odp);
return ret;
@@ -536,6 +537,10 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct mlx5_ib_mr *imr;
int err;
+ if (!mlx5_ib_can_load_pas_with_umr(dev,
+ MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
+ return ERR_PTR(-EOPNOTSUPP);
+
umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
@@ -551,6 +556,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr->umem = &umem_odp->umem;
imr->ibmr.lkey = imr->mmkey.key;
imr->ibmr.rkey = imr->mmkey.key;
+ imr->ibmr.device = &dev->ib_dev;
imr->umem = &umem_odp->umem;
imr->is_odp_implicit = true;
atomic_set(&imr->num_deferred_work, 0);
@@ -584,7 +590,7 @@ out_umem:
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
- struct mlx5_ib_dev *dev = imr->dev;
+ struct mlx5_ib_dev *dev = mr_to_mdev(imr);
struct list_head destroy_list;
struct mlx5_ib_mr *mtt;
struct mlx5_ib_mr *tmp;
@@ -654,10 +660,10 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
{
/* Prevent new page faults and prefetch requests from succeeding */
- xa_erase(&mr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
/* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&mr->dev->odp_srcu);
+ synchronize_srcu(&mr_to_mdev(mr)->odp_srcu);
wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));
@@ -701,7 +707,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
if (ret < 0) {
if (ret != -EAGAIN)
- mlx5_ib_err(mr->dev,
+ mlx5_ib_err(mr_to_mdev(mr),
"Failed to update mkey page tables\n");
goto out;
}
@@ -791,7 +797,7 @@ out:
MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&odp_imr->umem_mutex);
if (err) {
- mlx5_ib_err(imr->dev, "Failed to update PAS\n");
+ mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n");
return err;
}
return ret;
@@ -811,7 +817,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- lockdep_assert_held(&mr->dev->odp_srcu);
+ lockdep_assert_held(&mr_to_mdev(mr)->odp_srcu);
if (unlikely(io_virt < mr->mmkey.iova))
return -EFAULT;
@@ -831,17 +837,13 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
flags);
}
-int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable)
+int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
{
- u32 flags = MLX5_PF_FLAGS_SNAPSHOT;
int ret;
- if (enable)
- flags |= MLX5_PF_FLAGS_ENABLE;
-
- ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem),
- mr->umem->address, mr->umem->length, NULL,
- flags);
+ ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address,
+ mr->umem->length, NULL,
+ MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE);
return ret >= 0 ? 0 : ret;
}
@@ -1783,7 +1785,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
/* We rely on IB/core that work is executed if we have num_sge != 0 only. */
WARN_ON(!work->num_sge);
- dev = work->frags[0].mr->dev;
+ dev = mr_to_mdev(work->frags[0].mr);
/* SRCU should be held when calling to mlx5_odp_populate_xlt() */
srcu_key = srcu_read_lock(&dev->odp_srcu);
for (i = 0; i < work->num_sge; ++i) {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 600e056798c0..0cb7cc642d87 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -778,39 +778,6 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
return bfregi->sys_pages[index_of_sys_page] + offset;
}
-static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
- unsigned long addr, size_t size,
- struct ib_umem **umem, int *npages, int *page_shift,
- int *ncont, u32 *offset)
-{
- int err;
-
- *umem = ib_umem_get(&dev->ib_dev, addr, size, 0);
- if (IS_ERR(*umem)) {
- mlx5_ib_dbg(dev, "umem_get failed\n");
- return PTR_ERR(*umem);
- }
-
- mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
-
- err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
- if (err) {
- mlx5_ib_warn(dev, "bad offset\n");
- goto err_umem;
- }
-
- mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
- addr, size, *npages, *page_shift, *ncont, *offset);
-
- return 0;
-
-err_umem:
- ib_umem_release(*umem);
- *umem = NULL;
-
- return err;
-}
-
static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
{
@@ -833,10 +800,8 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- int page_shift = 0;
- int npages;
+ unsigned long page_size = 0;
u32 offset = 0;
- int ncont = 0;
int err;
if (!ucmd->buf_addr)
@@ -849,23 +814,26 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err;
}
- mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
- &ncont, NULL);
- err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
- &rwq->rq_page_offset);
- if (err) {
+ page_size = mlx5_umem_find_best_quantized_pgoff(
+ rwq->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64, &rwq->rq_page_offset);
+ if (!page_size) {
mlx5_ib_warn(dev, "bad offset\n");
+ err = -EINVAL;
goto err_umem;
}
- rwq->rq_num_pas = ncont;
- rwq->page_shift = page_shift;
- rwq->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size);
+ rwq->page_shift = order_base_2(page_size);
+ rwq->log_page_size = rwq->page_shift - MLX5_ADAPTER_PAGE_SHIFT;
rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
- mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
- (unsigned long long)ucmd->buf_addr, rwq->buf_size,
- npages, page_shift, ncont, offset);
+ mlx5_ib_dbg(
+ dev,
+ "addr 0x%llx, size %zd, npages %zu, page_size %ld, ncont %d, offset %d\n",
+ (unsigned long long)ucmd->buf_addr, rwq->buf_size,
+ ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas,
+ offset);
err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
if (err) {
@@ -896,10 +864,9 @@ static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
{
struct mlx5_ib_ucontext *context;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
- int page_shift = 0;
+ unsigned int page_offset_quantized = 0;
+ unsigned long page_size = 0;
int uar_index = 0;
- int npages;
- u32 offset = 0;
int bfregn;
int ncont = 0;
__be64 *pas;
@@ -950,11 +917,21 @@ static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (ucmd->buf_addr && ubuffer->buf_size) {
ubuffer->buf_addr = ucmd->buf_addr;
- err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr,
- ubuffer->buf_size, &ubuffer->umem,
- &npages, &page_shift, &ncont, &offset);
- if (err)
+ ubuffer->umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
+ ubuffer->buf_size, 0);
+ if (IS_ERR(ubuffer->umem)) {
+ err = PTR_ERR(ubuffer->umem);
goto err_bfreg;
+ }
+ page_size = mlx5_umem_find_best_quantized_pgoff(
+ ubuffer->umem, qpc, log_page_size,
+ MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
+ &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto err_umem;
+ }
+ ncont = ib_umem_num_dma_blocks(ubuffer->umem, page_size);
} else {
ubuffer->umem = NULL;
}
@@ -969,15 +946,14 @@ static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
MLX5_SET(create_qp_in, *in, uid, uid);
- pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
- if (ubuffer->umem)
- mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
-
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
-
- MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- MLX5_SET(qpc, qpc, page_offset, offset);
-
+ pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
+ if (ubuffer->umem) {
+ mlx5_ib_populate_pas(ubuffer->umem, page_size, pas, 0);
+ MLX5_SET(qpc, qpc, log_page_size,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, page_offset, page_offset_quantized);
+ }
MLX5_SET(qpc, qpc, uar_page, uar_index);
if (bfregn != MLX5_IB_INVALID_BFREG)
resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
@@ -1209,18 +1185,24 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
void *wq;
int inlen;
int err;
- int page_shift = 0;
- int npages;
- int ncont = 0;
- u32 offset = 0;
-
- err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr, ubuffer->buf_size,
- &sq->ubuffer.umem, &npages, &page_shift, &ncont,
- &offset);
- if (err)
- return err;
+ unsigned int page_offset_quantized;
+ unsigned long page_size;
+
+ sq->ubuffer.umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
+ ubuffer->buf_size, 0);
+ if (IS_ERR(sq->ubuffer.umem))
+ return PTR_ERR(sq->ubuffer.umem);
+ page_size = mlx5_umem_find_best_quantized_pgoff(
+ ubuffer->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64, &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto err_umem;
+ }
- inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont;
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) *
+ ib_umem_num_dma_blocks(sq->ubuffer.umem, page_size);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
@@ -1248,11 +1230,12 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size));
- MLX5_SET(wq, wq, log_wq_pg_sz, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- MLX5_SET(wq, wq, page_offset, offset);
+ MLX5_SET(wq, wq, log_wq_pg_sz,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(wq, wq, page_offset, page_offset_quantized);
pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
- mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
+ mlx5_ib_populate_pas(sq->ubuffer.umem, page_size, pas, 0);
err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
@@ -1278,40 +1261,31 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
ib_umem_release(sq->ubuffer.umem);
}
-static size_t get_rq_pas_size(void *qpc)
-{
- u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
- u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
- u32 log_rq_size = MLX5_GET(qpc, qpc, log_rq_size);
- u32 page_offset = MLX5_GET(qpc, qpc, page_offset);
- u32 po_quanta = 1 << (log_page_size - 6);
- u32 rq_sz = 1 << (log_rq_size + 4 + log_rq_stride);
- u32 page_size = 1 << log_page_size;
- u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
- u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
-
- return rq_num_pas * sizeof(u64);
-}
-
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin,
- size_t qpinlen, struct ib_pd *pd)
+ struct ib_pd *pd)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
- __be64 *qp_pas;
void *in;
void *rqc;
void *wq;
void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
- size_t rq_pas_size = get_rq_pas_size(qpc);
+ struct ib_umem *umem = rq->base.ubuffer.umem;
+ unsigned int page_offset_quantized;
+ unsigned long page_size = 0;
size_t inlen;
int err;
- if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
+ page_size = mlx5_umem_find_best_quantized_pgoff(umem, wq, log_wq_pg_sz,
+ MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64,
+ &page_offset_quantized);
+ if (!page_size)
return -EINVAL;
- inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+ sizeof(u64) * ib_umem_num_dma_blocks(umem, page_size);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1333,16 +1307,16 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
- MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
+ MLX5_SET(wq, wq, page_offset, page_offset_quantized);
MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4);
- MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size));
+ MLX5_SET(wq, wq, log_wq_pg_sz,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size));
pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
- qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
- memcpy(pas, qp_pas, rq_pas_size);
+ mlx5_ib_populate_pas(umem, page_size, pas, 0);
err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
@@ -1463,7 +1437,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
- err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
+ err = create_raw_packet_qp_rq(dev, rq, in, pd);
if (err)
goto err_destroy_sq;
@@ -2436,7 +2410,7 @@ static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
qp->state = IB_QPS_RESET;
-
+ rdma_restrack_no_track(&qp->ibqp.res);
return 0;
}
@@ -2460,6 +2434,7 @@ static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
case IB_QPT_GSI:
if (dev->profile == &raw_eth_profile)
goto out;
+ fallthrough;
case IB_QPT_RAW_PACKET:
case IB_QPT_UD:
case MLX5_IB_QPT_REG_UMR:
@@ -2712,11 +2687,12 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
true, qp);
- if (create_flags)
+ if (create_flags) {
mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n",
create_flags);
-
- return (create_flags) ? -EINVAL : 0;
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
static int process_udata_size(struct mlx5_ib_dev *dev,
@@ -3102,7 +3078,7 @@ static int ib_to_mlx5_rate_map(u8 rate)
return 5;
default:
return rate + MLX5_STAT_RATE_OFFSET;
- };
+ }
return 0;
}
@@ -4247,6 +4223,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err = -EINVAL;
int port;
+ if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
+ return -EOPNOTSUPP;
+
if (ibqp->rwq_ind_tbl)
return -ENOSYS;
@@ -4576,7 +4555,9 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path);
to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path);
qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index);
@@ -4882,7 +4863,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
MLX5_SET(rqc, rqc, delay_drop_en, 1);
}
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
- mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
+ mlx5_ib_populate_pas(rwq->umem, 1UL << rwq->page_shift, rq_pas0, 0);
err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
err = set_delay_drop(dev);
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
index 887270dd3ce2..4ac429e72004 100644
--- a/drivers/infiniband/hw/mlx5/restrack.c
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -116,7 +116,7 @@ static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
- return fill_res_raw(msg, mr->dev, MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
+ return fill_res_raw(msg, mr_to_mdev(mr), MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
mlx5_mkey_to_idx(mr->mmkey.key));
}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index e2f720eec1e1..fab6736e4d6a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -51,10 +51,6 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
udata, struct mlx5_ib_ucontext, ibucontext);
size_t ucmdlen;
int err;
- int npages;
- int page_shift;
- int ncont;
- u32 offset;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
ucmdlen = min(udata->inlen, sizeof(ucmd));
@@ -86,32 +82,14 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
err = PTR_ERR(srq->umem);
return err;
}
-
- mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
- &page_shift, &ncont, NULL);
- err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
- &offset);
- if (err) {
- mlx5_ib_warn(dev, "bad offset\n");
- goto err_umem;
- }
-
- in->pas = kvcalloc(ncont, sizeof(*in->pas), GFP_KERNEL);
- if (!in->pas) {
- err = -ENOMEM;
- goto err_umem;
- }
-
- mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
+ in->umem = srq->umem;
err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db);
if (err) {
mlx5_ib_dbg(dev, "map doorbell failed\n");
- goto err_in;
+ goto err_umem;
}
- in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- in->page_offset = offset;
in->uid = (in->type != IB_SRQT_XRC) ? to_mpd(pd)->uid : 0;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
in->type != IB_SRQT_BASIC)
@@ -119,9 +97,6 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return 0;
-err_in:
- kvfree(in->pas);
-
err_umem:
ib_umem_release(srq->umem);
@@ -226,6 +201,11 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
struct mlx5_srq_attr in = {};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC &&
+ init_attr->srq_type != IB_SRQT_TM)
+ return -EOPNOTSUPP;
+
/* Sanity check SRQ size before proceeding */
if (init_attr->attr.max_wr >= max_srq_wqes) {
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
diff --git a/drivers/infiniband/hw/mlx5/srq.h b/drivers/infiniband/hw/mlx5/srq.h
index 2c3627b2509d..a7e3dc5564ac 100644
--- a/drivers/infiniband/hw/mlx5/srq.h
+++ b/drivers/infiniband/hw/mlx5/srq.h
@@ -28,6 +28,7 @@ struct mlx5_srq_attr {
u32 user_index;
u64 db_record;
__be64 *pas;
+ struct ib_umem *umem;
u32 tm_log_list_size;
u32 tm_next_tag;
u32 tm_hw_phase_cnt;
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index db889ec3fd48..8b3385396599 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -92,6 +92,25 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
return srq;
}
+static int __set_srq_page_size(struct mlx5_srq_attr *in,
+ unsigned long page_size)
+{
+ if (!page_size)
+ return -EINVAL;
+ in->log_page_size = order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT;
+
+ if (WARN_ON(get_pas_size(in) !=
+ ib_umem_num_dma_blocks(in->umem, page_size) * sizeof(u64)))
+ return -EINVAL;
+ return 0;
+}
+
+#define set_srq_page_size(in, typ, log_pgsz_fld) \
+ __set_srq_page_size(in, mlx5_umem_find_best_quantized_pgoff( \
+ (in)->umem, typ, log_pgsz_fld, \
+ MLX5_ADAPTER_PAGE_SHIFT, page_offset, \
+ 64, &(in)->page_offset))
+
static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *in)
{
@@ -103,6 +122,12 @@ static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
int inlen;
int err;
+ if (in->umem) {
+ err = set_srq_page_size(in, srqc, log_page_size);
+ if (err)
+ return err;
+ }
+
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
create_in = kvzalloc(inlen, GFP_KERNEL);
@@ -114,7 +139,13 @@ static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
set_srqc(srqc, in);
- memcpy(pas, in->pas, pas_size);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
MLX5_SET(create_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_SRQ);
@@ -194,6 +225,12 @@ static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
int inlen;
int err;
+ if (in->umem) {
+ err = set_srq_page_size(in, xrc_srqc, log_page_size);
+ if (err)
+ return err;
+ }
+
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
create_in = kvzalloc(inlen, GFP_KERNEL);
@@ -207,7 +244,13 @@ static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
set_srqc(xrc_srqc, in);
MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
- memcpy(pas, in->pas, pas_size);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
MLX5_SET(create_xrc_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_XRC_SRQ);
@@ -289,11 +332,18 @@ static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
void *create_in = NULL;
void *rmpc;
void *wq;
+ void *pas;
int pas_size;
int outlen;
int inlen;
int err;
+ if (in->umem) {
+ err = set_srq_page_size(in, wq, log_wq_pg_sz);
+ if (err)
+ return err;
+ }
+
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
@@ -309,8 +359,16 @@ static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
MLX5_SET(create_rmp_in, create_in, uid, in->uid);
+ pas = MLX5_ADDR_OF(rmpc, rmpc, wq.pas);
+
set_wq(wq, in);
- memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
@@ -421,10 +479,17 @@ static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
void *create_in;
void *xrqc;
void *wq;
+ void *pas;
int pas_size;
int inlen;
int err;
+ if (in->umem) {
+ err = set_srq_page_size(in, wq, log_wq_pg_sz);
+ if (err)
+ return err;
+ }
+
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
create_in = kvzalloc(inlen, GFP_KERNEL);
@@ -433,9 +498,16 @@ static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
+ pas = MLX5_ADDR_OF(xrqc, xrqc, wq.pas);
set_wq(wq, in);
- memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
if (in->type == IB_SRQT_TM) {
MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 119b2573c9a0..26c3408dcaca 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -604,7 +604,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
break;
default:
- entry->opcode = MTHCA_OPCODE_INVALID;
+ entry->opcode = 0xFF;
break;
}
} else {
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 9dbbf4d16796..a445160de3e1 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -105,7 +105,6 @@ enum {
MTHCA_OPCODE_ATOMIC_CS = 0x11,
MTHCA_OPCODE_ATOMIC_FA = 0x12,
MTHCA_OPCODE_BIND_MW = 0x18,
- MTHCA_OPCODE_INVALID = 0xff
};
enum {
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index c4d9cdc4ee97..1a3dd07f993b 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -470,7 +470,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
int err;
if (init_attr->create_flags)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
switch (init_attr->qp_type) {
case IB_QPT_RC:
@@ -612,7 +612,7 @@ static int mthca_create_cq(struct ib_cq *ibcq,
udata, struct mthca_ucontext, ibucontext);
if (attr->flags)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
return -EINVAL;
@@ -961,29 +961,34 @@ static ssize_t hw_rev_show(struct device *device,
struct mthca_dev *dev =
rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
- return sprintf(buf, "%x\n", dev->rev_id);
+ return sysfs_emit(buf, "%x\n", dev->rev_id);
}
static DEVICE_ATTR_RO(hw_rev);
-static ssize_t hca_type_show(struct device *device,
- struct device_attribute *attr, char *buf)
+static const char *hca_type_string(int hca_type)
{
- struct mthca_dev *dev =
- rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
-
- switch (dev->pdev->device) {
+ switch (hca_type) {
case PCI_DEVICE_ID_MELLANOX_TAVOR:
- return sprintf(buf, "MT23108\n");
+ return "MT23108";
case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
- return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
+ return "MT25208 (MT23108 compat mode)";
case PCI_DEVICE_ID_MELLANOX_ARBEL:
- return sprintf(buf, "MT25208\n");
+ return "MT25208";
case PCI_DEVICE_ID_MELLANOX_SINAI:
case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
- return sprintf(buf, "MT25204\n");
- default:
- return sprintf(buf, "unknown\n");
+ return "MT25204";
}
+
+ return "unknown";
+}
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mthca_dev *dev =
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
+
+ return sysfs_emit(buf, "%s\n", hca_type_string(dev->pdev->device));
}
static DEVICE_ATTR_RO(hca_type);
@@ -993,7 +998,7 @@ static ssize_t board_id_show(struct device *device,
struct mthca_dev *dev =
rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
- return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
+ return sysfs_emit(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
}
static DEVICE_ATTR_RO(board_id);
@@ -1158,36 +1163,12 @@ int mthca_register_device(struct mthca_dev *dev)
if (ret)
return ret;
- dev->ib_dev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.num_comp_vectors = 1;
dev->ib_dev.dev.parent = &dev->pdev->dev;
if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
- dev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
-
if (mthca_is_memfree(dev))
ib_set_device_ops(&dev->ib_dev,
&mthca_dev_arbel_srq_ops);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 08a2a7afafd3..07cfc0934b17 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -863,6 +863,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
mutex_lock(&qp->mutex);
if (attr_mask & IB_QP_CUR_STATE) {
cur_state = attr->cur_qp_state;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 9b96661a7143..9a834a9cca0e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -119,7 +119,7 @@ static ssize_t hw_rev_show(struct device *device,
struct ocrdma_dev *dev =
rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
+ return sysfs_emit(buf, "0x%x\n", dev->nic_info.pdev->vendor);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -129,7 +129,7 @@ static ssize_t hca_type_show(struct device *device,
struct ocrdma_dev *dev =
rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
+ return sysfs_emit(buf, "%s\n", &dev->model_number[0]);
}
static DEVICE_ATTR_RO(hca_type);
@@ -154,6 +154,7 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.create_ah = ocrdma_create_ah,
.create_cq = ocrdma_create_cq,
.create_qp = ocrdma_create_qp,
+ .create_user_ah = ocrdma_create_ah,
.dealloc_pd = ocrdma_dealloc_pd,
.dealloc_ucontext = ocrdma_dealloc_ucontext,
.dereg_mr = ocrdma_dereg_mr,
@@ -204,32 +205,6 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
- dev->ibdev.uverbs_cmd_mask =
- OCRDMA_UVERBS(GET_CONTEXT) |
- OCRDMA_UVERBS(QUERY_DEVICE) |
- OCRDMA_UVERBS(QUERY_PORT) |
- OCRDMA_UVERBS(ALLOC_PD) |
- OCRDMA_UVERBS(DEALLOC_PD) |
- OCRDMA_UVERBS(REG_MR) |
- OCRDMA_UVERBS(DEREG_MR) |
- OCRDMA_UVERBS(CREATE_COMP_CHANNEL) |
- OCRDMA_UVERBS(CREATE_CQ) |
- OCRDMA_UVERBS(RESIZE_CQ) |
- OCRDMA_UVERBS(DESTROY_CQ) |
- OCRDMA_UVERBS(REQ_NOTIFY_CQ) |
- OCRDMA_UVERBS(CREATE_QP) |
- OCRDMA_UVERBS(MODIFY_QP) |
- OCRDMA_UVERBS(QUERY_QP) |
- OCRDMA_UVERBS(DESTROY_QP) |
- OCRDMA_UVERBS(POLL_CQ) |
- OCRDMA_UVERBS(POST_SEND) |
- OCRDMA_UVERBS(POST_RECV);
-
- dev->ibdev.uverbs_cmd_mask |=
- OCRDMA_UVERBS(CREATE_AH) |
- OCRDMA_UVERBS(MODIFY_AH) |
- OCRDMA_UVERBS(QUERY_AH) |
- OCRDMA_UVERBS(DESTROY_AH);
dev->ibdev.node_type = RDMA_NODE_IB_CA;
dev->ibdev.phys_port_cnt = 1;
@@ -240,16 +215,9 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
ib_set_device_ops(&dev->ibdev, &ocrdma_dev_ops);
- if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
- dev->ibdev.uverbs_cmd_mask |=
- OCRDMA_UVERBS(CREATE_SRQ) |
- OCRDMA_UVERBS(MODIFY_SRQ) |
- OCRDMA_UVERBS(QUERY_SRQ) |
- OCRDMA_UVERBS(DESTROY_SRQ) |
- OCRDMA_UVERBS(POST_SRQ_RECV);
-
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R)
ib_set_device_ops(&dev->ibdev, &ocrdma_dev_srq_ops);
- }
+
rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group);
ret = ib_device_set_netdev(&dev->ibdev, dev->nic_info.netdev, 1);
if (ret)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 7350fe16f164..3acb5c10b155 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -434,9 +434,9 @@ static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
__func__, dev->id, pd->id);
}
- kfree(uctx->cntxt_pd);
uctx->cntxt_pd = NULL;
_ocrdma_dealloc_pd(dev, pd);
+ kfree(pd);
}
static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -974,7 +974,7 @@ int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ocrdma_create_cq_ureq ureq;
if (attr->flags)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (udata) {
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
@@ -1299,6 +1299,9 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
struct ocrdma_create_qp_ureq ureq;
u16 dpp_credit_lmt, dpp_offset;
+ if (attrs->create_flags)
+ return ERR_PTR(-EOPNOTSUPP);
+
status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
if (status)
goto gen_err;
@@ -1391,6 +1394,9 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ocrdma_dev *dev;
enum ib_qp_state old_qps, new_qps;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
qp = get_ocrdma_qp(ibqp);
dev = get_ocrdma_dev(ibqp->device);
@@ -1770,6 +1776,9 @@ int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
+ if (init_attr->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
return -EINVAL;
if (init_attr->attr.max_wr > dev->attr.max_rqe)
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 967641662b24..8e7c069e1a2d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -124,7 +124,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct qedr_dev *dev =
rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
+ return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -134,10 +134,9 @@ static ssize_t hca_type_show(struct device *device,
struct qedr_dev *dev =
rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
- dev->pdev->device,
- rdma_protocol_iwarp(&dev->ibdev, 1) ?
- "iWARP" : "RoCE");
+ return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device,
+ rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" :
+ "RoCE");
}
static DEVICE_ATTR_RO(hca_type);
@@ -188,10 +187,6 @@ static void qedr_roce_register_device(struct qedr_dev *dev)
dev->ibdev.node_type = RDMA_NODE_IB_CA;
ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops);
-
- dev->ibdev.uverbs_cmd_mask |= QEDR_UVERBS(OPEN_XRCD) |
- QEDR_UVERBS(CLOSE_XRCD) |
- QEDR_UVERBS(CREATE_XSRQ);
}
static const struct ib_device_ops qedr_dev_ops = {
@@ -249,31 +244,6 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.node_guid = dev->attr.node_guid;
memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
- dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
- QEDR_UVERBS(QUERY_DEVICE) |
- QEDR_UVERBS(QUERY_PORT) |
- QEDR_UVERBS(ALLOC_PD) |
- QEDR_UVERBS(DEALLOC_PD) |
- QEDR_UVERBS(CREATE_COMP_CHANNEL) |
- QEDR_UVERBS(CREATE_CQ) |
- QEDR_UVERBS(RESIZE_CQ) |
- QEDR_UVERBS(DESTROY_CQ) |
- QEDR_UVERBS(REQ_NOTIFY_CQ) |
- QEDR_UVERBS(CREATE_QP) |
- QEDR_UVERBS(MODIFY_QP) |
- QEDR_UVERBS(QUERY_QP) |
- QEDR_UVERBS(DESTROY_QP) |
- QEDR_UVERBS(CREATE_SRQ) |
- QEDR_UVERBS(DESTROY_SRQ) |
- QEDR_UVERBS(QUERY_SRQ) |
- QEDR_UVERBS(MODIFY_SRQ) |
- QEDR_UVERBS(POST_SRQ_RECV) |
- QEDR_UVERBS(REG_MR) |
- QEDR_UVERBS(DEREG_MR) |
- QEDR_UVERBS(POLL_CQ) |
- QEDR_UVERBS(POST_SEND) |
- QEDR_UVERBS(POST_RECV);
-
if (IS_IWARP(dev)) {
rc = qedr_iw_register_device(dev);
if (rc)
@@ -796,6 +766,7 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
}
xa_unlock_irqrestore(&dev->srqs, flags);
DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
+ break;
default:
break;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 511c95bb3d01..0eb6a7a618e0 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -928,6 +928,9 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
"create_cq: called from %s. entries=%d, vector=%d\n",
udata ? "User Lib" : "Kernel", entries, vector);
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
if (entries > QEDR_MAX_CQES) {
DP_ERR(dev,
"create cq: the number of entries %d is too high. Must be equal or below %d.\n",
@@ -1546,6 +1549,10 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
"create SRQ called from %s (pd %p)\n",
(udata) ? "User lib" : "kernel", pd);
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC)
+ return -EOPNOTSUPP;
+
rc = qedr_check_srq_params(dev, init_attr, udata);
if (rc)
return -EINVAL;
@@ -2241,6 +2248,9 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
struct ib_qp *ibqp;
int rc = 0;
+ if (attrs->create_flags)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (attrs->qp_type == IB_QPT_XRC_TGT) {
xrcd = get_qedr_xrcd(attrs->xrcd);
dev = get_qedr_dev(xrcd->ibxrcd.device);
@@ -2477,6 +2487,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
"modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
attr->qp_state);
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
old_qp_state = qedr_get_ibqp_state(qp->state);
if (attr_mask & IB_QP_STATE)
new_qp_state = attr->qp_state;
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 3dc6ce033319..2e07b3749b88 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -90,25 +90,18 @@ int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
goto bail;
}
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
/*
* If the 64 bit setup fails, try 32 bit. Some systems
* do not setup 64 bit maps on systems with 2GB or less
* memory installed.
*/
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
goto bail;
}
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- } else
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- qib_early_err(&pdev->dev,
- "Unable to set DMA consistent mask: %d\n", ret);
- goto bail;
}
pci_set_master(pdev);
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 021df0654ba7..62c179fc764b 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -43,11 +43,8 @@
static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
{
struct qib_devdata *dd = ppd->dd;
- int ret;
- ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
- return ret;
+ return sysfs_emit(buf, "%d\n", dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT));
}
static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
@@ -106,14 +103,10 @@ static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
{
- ssize_t ret;
-
if (!ppd->statusp)
- ret = -EINVAL;
- else
- ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long) *(ppd->statusp));
- return ret;
+ return -EINVAL;
+
+ return sysfs_emit(buf, "0x%llx\n", (unsigned long long)*(ppd->statusp));
}
/*
@@ -392,7 +385,7 @@ static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
container_of(kobj, struct qib_pportdata, sl2vl_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
- return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
+ return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
}
static const struct sysfs_ops qib_sl2vl_ops = {
@@ -501,17 +494,18 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
+ u64 val;
if (!strncmp(dattr->attr.name, "rc_acks", 7))
- return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks));
+ val = READ_PER_CPU_CNTR(rc_acks);
else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
- return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks));
+ val = READ_PER_CPU_CNTR(rc_qacks);
else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
- return sprintf(buf, "%llu\n",
- READ_PER_CPU_CNTR(rc_delayed_comp));
+ val = READ_PER_CPU_CNTR(rc_delayed_comp);
else
- return sprintf(buf, "%u\n",
- *(u32 *)((char *)qibp + dattr->counter));
+ val = *(u32 *)((char *)qibp + dattr->counter);
+
+ return sysfs_emit(buf, "%llu\n", val);
}
static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
@@ -565,7 +559,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct qib_ibdev *dev =
rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
+ return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -575,13 +569,10 @@ static ssize_t hca_type_show(struct device *device,
struct qib_ibdev *dev =
rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
- int ret;
if (!dd->boardname)
- ret = -EINVAL;
- else
- ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
- return ret;
+ return -EINVAL;
+ return sysfs_emit(buf, "%s\n", dd->boardname);
}
static DEVICE_ATTR_RO(hca_type);
static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
@@ -590,7 +581,7 @@ static ssize_t version_show(struct device *device,
struct device_attribute *attr, char *buf)
{
/* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
+ return sysfs_emit(buf, "%s", (char *)ib_qib_version);
}
static DEVICE_ATTR_RO(version);
@@ -602,7 +593,7 @@ static ssize_t boardversion_show(struct device *device,
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
+ return sysfs_emit(buf, "%s", dd->boardversion);
}
static DEVICE_ATTR_RO(boardversion);
@@ -614,7 +605,7 @@ static ssize_t localbus_info_show(struct device *device,
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
+ return sysfs_emit(buf, "%s", dd->lbus_info);
}
static DEVICE_ATTR_RO(localbus_info);
@@ -628,9 +619,10 @@ static ssize_t nctxts_show(struct device *device,
/* Return the number of user ports (contexts) available. */
/* The calculation below deals with a special case where
* cfgctxts is set to 1 on a single-port board. */
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
- (dd->cfgctxts - dd->first_user_ctxt));
+ return sysfs_emit(buf, "%u\n",
+ (dd->first_user_ctxt > dd->cfgctxts) ?
+ 0 :
+ (dd->cfgctxts - dd->first_user_ctxt));
}
static DEVICE_ATTR_RO(nctxts);
@@ -642,21 +634,20 @@ static ssize_t nfreectxts_show(struct device *device,
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of free user ports (contexts) available. */
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
+ return sysfs_emit(buf, "%u\n", dd->freectxts);
}
static DEVICE_ATTR_RO(nfreectxts);
-static ssize_t serial_show(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t serial_show(struct device *device, struct device_attribute *attr,
+ char *buf)
{
struct qib_ibdev *dev =
rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
+ const u8 *end = memchr(dd->serial, 0, ARRAY_SIZE(dd->serial));
+ int size = end ? end - dd->serial : ARRAY_SIZE(dd->serial);
- buf[sizeof(dd->serial)] = '\0';
- memcpy(buf, dd->serial, sizeof(dd->serial));
- strcat(buf, "\n");
- return strlen(buf);
+ return sysfs_emit(buf, ".%*s\n", size, dd->serial);
}
static DEVICE_ATTR_RO(serial);
@@ -689,27 +680,26 @@ static ssize_t tempsense_show(struct device *device,
struct qib_ibdev *dev =
rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
- int ret;
- int idx;
+ int i;
u8 regvals[8];
- ret = -ENXIO;
- for (idx = 0; idx < 8; ++idx) {
- if (idx == 6)
+ for (i = 0; i < 8; i++) {
+ int ret;
+
+ if (i == 6)
continue;
- ret = dd->f_tempsense_rd(dd, idx);
+ ret = dd->f_tempsense_rd(dd, i);
if (ret < 0)
- break;
- regvals[idx] = ret;
+ return ret; /* return error on bad read */
+ regvals[i] = ret;
}
- if (idx == 8)
- ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
- *(signed char *)(regvals),
- *(signed char *)(regvals + 1),
- regvals[2], regvals[3],
- *(signed char *)(regvals + 5),
- *(signed char *)(regvals + 7));
- return ret;
+ return sysfs_emit(buf, "%d %d %02X %02X %d %d\n",
+ (signed char)regvals[0],
+ (signed char)regvals[1],
+ regvals[2],
+ regvals[3],
+ (signed char)regvals[5],
+ (signed char)regvals[7]);
}
static DEVICE_ATTR_RO(tempsense);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index aa2e65fc5cd6..1b63a491fa72 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -398,25 +398,6 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
us_ibdev->ib_dev.dev.parent = &dev->dev;
- us_ibdev->ib_dev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_OPEN_QP);
-
ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index c85d48ae7442..586b0e52ba7f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -57,7 +57,7 @@ static ssize_t board_id_show(struct device *device,
subsystem_device_id = us_ibdev->pdev->subsystem_device;
mutex_unlock(&us_ibdev->usdev_lock);
- return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id);
+ return sysfs_emit(buf, "%u\n", subsystem_device_id);
}
static DEVICE_ATTR_RO(board_id);
@@ -69,19 +69,13 @@ config_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev =
rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
- char *ptr;
- unsigned left;
- unsigned n;
enum usnic_vnic_res_type res_type;
-
- /* Buffer space limit is 1 page */
- ptr = buf;
- left = PAGE_SIZE;
+ int len;
mutex_lock(&us_ibdev->usdev_lock);
if (kref_read(&us_ibdev->vf_cnt) > 0) {
char *busname;
-
+ char *sep = "";
/*
* bus name seems to come with annoying prefix.
* Remove it if it is predictable
@@ -90,39 +84,35 @@ config_show(struct device *device, struct device_attribute *attr, char *buf)
if (strncmp(busname, "PCI Bus ", 8) == 0)
busname += 8;
- n = scnprintf(ptr, left,
- "%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
- dev_name(&us_ibdev->ib_dev.dev),
- busname,
- PCI_SLOT(us_ibdev->pdev->devfn),
- PCI_FUNC(us_ibdev->pdev->devfn),
- netdev_name(us_ibdev->netdev),
- us_ibdev->ufdev->mac,
- kref_read(&us_ibdev->vf_cnt));
- UPDATE_PTR_LEFT(n, ptr, left);
+ len = sysfs_emit(buf, "%s: %s:%d.%d, %s, %pM, %u VFs\n",
+ dev_name(&us_ibdev->ib_dev.dev),
+ busname,
+ PCI_SLOT(us_ibdev->pdev->devfn),
+ PCI_FUNC(us_ibdev->pdev->devfn),
+ netdev_name(us_ibdev->netdev),
+ us_ibdev->ufdev->mac,
+ kref_read(&us_ibdev->vf_cnt));
+ len += sysfs_emit_at(buf, len, " Per VF:");
for (res_type = USNIC_VNIC_RES_TYPE_EOL;
- res_type < USNIC_VNIC_RES_TYPE_MAX;
- res_type++) {
+ res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
if (us_ibdev->vf_res_cnt[res_type] == 0)
continue;
- n = scnprintf(ptr, left, " %d %s%s",
- us_ibdev->vf_res_cnt[res_type],
- usnic_vnic_res_type_to_str(res_type),
- (res_type < (USNIC_VNIC_RES_TYPE_MAX - 1)) ?
- "," : "");
- UPDATE_PTR_LEFT(n, ptr, left);
+ len += sysfs_emit_at(buf, len, "%s %d %s",
+ sep,
+ us_ibdev->vf_res_cnt[res_type],
+ usnic_vnic_res_type_to_str(res_type));
+ sep = ",";
}
- n = scnprintf(ptr, left, "\n");
- UPDATE_PTR_LEFT(n, ptr, left);
+ len += sysfs_emit_at(buf, len, "\n");
} else {
- n = scnprintf(ptr, left, "%s: no VFs\n",
- dev_name(&us_ibdev->ib_dev.dev));
- UPDATE_PTR_LEFT(n, ptr, left);
+ len = sysfs_emit(buf, "%s: no VFs\n",
+ dev_name(&us_ibdev->ib_dev.dev));
}
+
mutex_unlock(&us_ibdev->usdev_lock);
- return ptr - buf;
+ return len;
}
static DEVICE_ATTR_RO(config);
@@ -132,8 +122,7 @@ iface_show(struct device *device, struct device_attribute *attr, char *buf)
struct usnic_ib_dev *us_ibdev =
rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- netdev_name(us_ibdev->netdev));
+ return sysfs_emit(buf, "%s\n", netdev_name(us_ibdev->netdev));
}
static DEVICE_ATTR_RO(iface);
@@ -143,8 +132,7 @@ max_vf_show(struct device *device, struct device_attribute *attr, char *buf)
struct usnic_ib_dev *us_ibdev =
rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- kref_read(&us_ibdev->vf_cnt));
+ return sysfs_emit(buf, "%u\n", kref_read(&us_ibdev->vf_cnt));
}
static DEVICE_ATTR_RO(max_vf);
@@ -158,8 +146,7 @@ qp_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
- return scnprintf(buf, PAGE_SIZE,
- "%d\n", qp_per_vf);
+ return sysfs_emit(buf, "%d\n", qp_per_vf);
}
static DEVICE_ATTR_RO(qp_per_vf);
@@ -169,8 +156,8 @@ cq_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
struct usnic_ib_dev *us_ibdev =
rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
+ return sysfs_emit(buf, "%d\n",
+ us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
}
static DEVICE_ATTR_RO(cq_per_vf);
@@ -217,43 +204,35 @@ struct qpn_attribute qpn_attr_##NAME = __ATTR_RO(NAME)
static ssize_t context_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "0x%p\n", qp_grp->ctx);
+ return sysfs_emit(buf, "0x%p\n", qp_grp->ctx);
}
static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
{
- int i, j, n;
- int left;
- char *ptr;
+ int i, j;
struct usnic_vnic_res_chunk *res_chunk;
struct usnic_vnic_res *vnic_res;
+ int len;
- left = PAGE_SIZE;
- ptr = buf;
-
- n = scnprintf(ptr, left,
- "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
- qp_grp->ibqp.qp_num,
- usnic_ib_qp_grp_state_to_string(qp_grp->state),
- qp_grp->owner_pid,
- usnic_vnic_get_index(qp_grp->vf->vnic));
- UPDATE_PTR_LEFT(n, ptr, left);
+ len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
+ qp_grp->ibqp.qp_num,
+ usnic_ib_qp_grp_state_to_string(qp_grp->state),
+ qp_grp->owner_pid,
+ usnic_vnic_get_index(qp_grp->vf->vnic));
for (i = 0; qp_grp->res_chunk_list[i]; i++) {
res_chunk = qp_grp->res_chunk_list[i];
for (j = 0; j < res_chunk->cnt; j++) {
vnic_res = res_chunk->res[j];
- n = scnprintf(ptr, left, "%s[%d] ",
+ len += sysfs_emit_at(buf, len, " %s[%d]",
usnic_vnic_res_type_to_str(vnic_res->type),
vnic_res->vnic_idx);
- UPDATE_PTR_LEFT(n, ptr, left);
}
}
- n = scnprintf(ptr, left, "\n");
- UPDATE_PTR_LEFT(n, ptr, left);
+ len += sysfs_emit_at(buf, len, "\n");
- return ptr - buf;
+ return len;
}
static QPN_ATTR_RO(context);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 9e961f8ffa10..3705c6b8b223 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -214,6 +214,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
}
usnic_uiom_free_dev_list(dev_list);
+ dev_list = NULL;
}
/* Try to find resources on an unused vf */
@@ -239,6 +240,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
qp_grp_check:
if (IS_ERR_OR_NULL(qp_grp)) {
usnic_err("Failed to allocate qp_grp\n");
+ if (usnic_ib_share_vf)
+ usnic_uiom_free_dev_list(dev_list);
return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
}
return qp_grp;
@@ -474,7 +477,7 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
us_ibdev = to_usdev(pd->device);
if (init_attr->create_flags)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
@@ -557,6 +560,9 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int status;
usnic_dbg("\n");
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
qp_grp = to_uqp_grp(ibqp);
mutex_lock(&qp_grp->vf->pf->usdev_lock);
@@ -581,7 +587,7 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
if (attr->flags)
- return -EINVAL;
+ return -EOPNOTSUPP;
return 0;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index c142f5e7f25f..de57f2fed743 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags)
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
}
+static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
+{
+ switch (type) {
+ case PVRDMA_NETWORK_ROCE_V1:
+ return RDMA_NETWORK_ROCE_V1;
+ case PVRDMA_NETWORK_IPV4:
+ return RDMA_NETWORK_IPV4;
+ case PVRDMA_NETWORK_IPV6:
+ return RDMA_NETWORK_IPV6;
+ default:
+ return RDMA_NETWORK_IPV6;
+ }
+}
+
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
const struct pvrdma_qp_cap *src);
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 319546a39a0d..6aa40bd2fd52 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -119,6 +119,9 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
entries = roundup_pow_of_two(entries);
if (entries < 1 || entries > dev->dsr->caps.max_cqe)
return -EINVAL;
@@ -364,7 +367,7 @@ retry:
wc->dlid_path_bits = cqe->dlid_path_bits;
wc->port_num = cqe->port_num;
wc->vendor_err = cqe->vendor_err;
- wc->network_hdr_type = cqe->network_hdr_type;
+ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
/* Update shared ring state */
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 6895bac53990..00a330909bb3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -68,21 +68,21 @@ static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
+ return sysfs_emit(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
}
static DEVICE_ATTR_RO(hca_type);
static ssize_t hw_rev_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", PVRDMA_REV_ID);
+ return sysfs_emit(buf, "%d\n", PVRDMA_REV_ID);
}
static DEVICE_ATTR_RO(hw_rev);
static ssize_t board_id_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
+ return sysfs_emit(buf, "%d\n", PVRDMA_BOARD_ID);
}
static DEVICE_ATTR_RO(board_id);
@@ -205,27 +205,6 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
dev->flags = 0;
dev->ib_dev.num_comp_vectors = 1;
dev->ib_dev.dev.parent = &dev->pdev->dev;
- dev->ib_dev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
@@ -249,13 +228,6 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
/* Check if SRQ is supported by backend */
if (dev->dsr->caps.max_srq) {
- dev->ib_dev.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
-
ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops);
dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 428256c55065..1d3bdd7bb51d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -209,7 +209,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
dev_warn(&dev->pdev->dev,
"invalid create queuepair flags %#x\n",
init_attr->create_flags);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
if (init_attr->qp_type != IB_QPT_RC &&
@@ -544,6 +544,9 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum ib_qp_state cur_state, next_state;
int ret;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
/* Sanity checking. Should need lock here */
mutex_lock(&qp->mutex);
cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 082208f9aa90..bdc2703532c6 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -121,7 +121,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
dev_warn(&dev->pdev->dev,
"shared receive queue type %d not supported\n",
init_attr->srq_type);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr ||
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index c8e268082952..0df48b3a6b56 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -4,6 +4,5 @@ config INFINIBAND_RDMAVT
depends on INFINIBAND_VIRT_DMA
depends on X86_64
depends on PCI
- select DMA_VIRT_OPS
help
This is a common software verbs provider for RDMA networks.
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index b938c4ffa99a..a3e5b368c5e7 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -126,10 +126,9 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
}
/**
- * rvt_destory_ah - Destory an address handle
+ * rvt_destroy_ah - Destroy an address handle
* @ibah: address handle
* @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
- *
* Return: 0 on success
*/
int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags)
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 19248be14093..20cc0799ac4b 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -211,7 +211,7 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int err;
if (attr->flags)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (entries < 1 || entries > rdi->dparms.props.max_cqe)
return -EINVAL;
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index dd11c6fcd060..5233a63d99a6 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -54,7 +54,7 @@
#include "mcast.h"
/**
- * rvt_driver_mcast - init resources for multicast
+ * rvt_driver_mcast_init - init resources for multicast
* @rdi: rvt dev struct
*
* This is per device that registers with rdmavt
@@ -69,7 +69,7 @@ void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
}
/**
- * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
+ * rvt_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* @qp: the QP to link
*/
static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
@@ -98,7 +98,7 @@ static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
}
/**
- * mcast_alloc - allocate the multicast GID structure
+ * rvt_mcast_alloc - allocate the multicast GID structure
* @mgid: the multicast GID
* @lid: the muilticast LID (host order)
*
@@ -181,7 +181,7 @@ struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
EXPORT_SYMBOL(rvt_mcast_find);
/**
- * mcast_add - insert mcast GID into table and attach QP struct
+ * rvt_mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
*
@@ -426,8 +426,8 @@ int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
/**
- *rvt_mast_tree_empty - determine if any qps are attached to any mcast group
- *@rdi: rvt dev struct
+ * rvt_mcast_tree_empty - determine if any qps are attached to any mcast group
+ * @rdi: rvt dev struct
*
* Return: in use count
*/
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 8490fdb9c91e..90fc234f489a 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -324,8 +324,6 @@ static void __rvt_free_mr(struct rvt_mr *mr)
* @acc: access flags
*
* Return: the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the functions in
- * struct dma_virt_ops.
*/
struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
{
@@ -766,7 +764,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
/*
* We use LKEY == zero for kernel virtual addresses
- * (see rvt_get_dma_mr() and dma_virt_ops).
+ * (see rvt_get_dma_mr()).
*/
if (sge->lkey == 0) {
struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
@@ -877,7 +875,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
/*
* We use RKEY == zero for kernel virtual addresses
- * (see rvt_get_dma_mr() and dma_virt_ops).
+ * (see rvt_get_dma_mr()).
*/
rcu_read_lock();
if (rkey == 0) {
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index ee48befc8978..22fa9bde5419 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1083,10 +1083,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (!rdi)
return ERR_PTR(-EINVAL);
+ if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
- init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
- (init_attr->create_flags &&
- init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
+ init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
return ERR_PTR(-EINVAL);
/* Check receive queue parameters if no SRQ is specified. */
@@ -1469,6 +1470,9 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int pmtu = 0; /* for gcc warning only */
int opa_ah;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
spin_lock_irq(&qp->r_lock);
spin_lock(&qp->s_hlock);
spin_lock(&qp->s_lock);
@@ -1823,7 +1827,7 @@ int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
/**
- * rvt_post_receive - post a receive on a QP
+ * rvt_post_recv - post a receive on a QP
* @ibqp: the QP to post the receive on
* @wr: the WR to post
* @bad_wr: the first bad WR is put here
@@ -2245,7 +2249,7 @@ bail:
}
/**
- * rvt_post_srq_receive - post a receive on a shared receive queue
+ * rvt_post_srq_recv - post a receive on a shared receive queue
* @ibsrq: the SRQ to post the receive on
* @wr: the list of work requests to post
* @bad_wr: A pointer to the first WR to cause a problem is put here
@@ -2497,7 +2501,7 @@ bail:
EXPORT_SYMBOL(rvt_get_rwqe);
/**
- * qp_comm_est - handle trap with QP established
+ * rvt_comm_est - handle trap with QP established
* @qp: the QP
*/
void rvt_comm_est(struct rvt_qp *qp)
@@ -2943,7 +2947,7 @@ static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
}
/**
- * ruc_loopback - handle UC and RC loopback requests
+ * rvt_ruc_loopback - handle UC and RC loopback requests
* @sqp: the sending QP
*
* This is called from rvt_do_send() to forward a WQE addressed to the same HFI
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 670a9623b46e..49cec85a372a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -384,6 +384,7 @@ static const struct ib_device_ops rvt_dev_ops = {
.create_cq = rvt_create_cq,
.create_qp = rvt_create_qp,
.create_srq = rvt_create_srq,
+ .create_user_ah = rvt_create_ah,
.dealloc_pd = rvt_dealloc_pd,
.dealloc_ucontext = rvt_dealloc_ucontext,
.dereg_mr = rvt_dereg_mr,
@@ -524,7 +525,6 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
int rvt_register_device(struct rvt_dev_info *rdi)
{
int ret = 0, i;
- u64 dma_mask;
if (!rdi)
return -EINVAL;
@@ -579,13 +579,6 @@ int rvt_register_device(struct rvt_dev_info *rdi)
/* Completion queues */
spin_lock_init(&rdi->n_cqs_lock);
- /* DMA Operations */
- rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms;
- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
- ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask);
- if (ret)
- goto bail_wss;
-
/* Protection Domain */
spin_lock_init(&rdi->n_pds_lock);
rdi->n_pds_allocated = 0;
@@ -596,36 +589,11 @@ int rvt_register_device(struct rvt_dev_info *rdi)
* exactly which functions rdmavt supports, nor do they know the ABI
* version, so we do all of this sort of stuff here.
*/
- rdi->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
- (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ rdi->ibdev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
(1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
rdi->ibdev.node_type = RDMA_NODE_IB_CA;
if (!rdi->ibdev.num_comp_vectors)
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 8810bfa68049..452149066792 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -5,7 +5,6 @@ config RDMA_RXE
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRYPTO_CRC32
- select DMA_VIRT_OPS
help
This driver implements the InfiniBand RDMA transport over
the Linux network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 43394c3f29d4..b315ebf041ac 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -123,11 +123,6 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
- /* make sure all changes to the CQ are written before we update the
- * producer pointer
- */
- smp_wmb();
-
advance_producer(cq->queue);
spin_unlock_irqrestore(&cq->cq_lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index d2ce852447c1..6e8c41567ba0 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -31,7 +31,6 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
return 0;
case RXE_MEM_TYPE_MR:
- case RXE_MEM_TYPE_FMR:
if (iova < mem->iova ||
length > mem->length ||
iova > mem->iova + mem->length - length)
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 34bef7d8e6b4..943914c2a50c 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -20,18 +20,6 @@
static struct rxe_recv_sockets recv_sockets;
-struct device *rxe_dma_device(struct rxe_dev *rxe)
-{
- struct net_device *ndev;
-
- ndev = rxe->ndev;
-
- if (is_vlan_dev(ndev))
- ndev = vlan_dev_real_dev(ndev);
-
- return ndev->dev.parent;
-}
-
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
int err;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 7d434a6837a7..2902ca7b288c 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -7,9 +7,11 @@
#ifndef RXE_QUEUE_H
#define RXE_QUEUE_H
+/* for definition of shared struct rxe_queue_buf */
+#include <uapi/rdma/rdma_user_rxe.h>
+
/* implements a simple circular buffer that can optionally be
* shared between user space and the kernel and can be resized
-
* the requested element size is rounded up to a power of 2
* and the number of elements in the buffer is also rounded
* up to a power of 2. Since the queue is empty when the
@@ -17,28 +19,6 @@
* of the queue is one less than the number of element slots
*/
-/* this data structure is shared between user space and kernel
- * space for those cases where the queue is shared. It contains
- * the producer and consumer indices. Is also contains a copy
- * of the queue size parameters for user space to use but the
- * kernel must use the parameters in the rxe_queue struct
- * this MUST MATCH the corresponding librxe struct
- * for performance reasons arrange to have producer and consumer
- * pointers in separate cache lines
- * the kernel should always mask the indices to avoid accessing
- * memory outside of the data area
- */
-struct rxe_queue_buf {
- __u32 log2_elem_size;
- __u32 index_mask;
- __u32 pad_1[30];
- __u32 producer_index;
- __u32 pad_2[31];
- __u32 consumer_index;
- __u32 pad_3[31];
- __u8 data[];
-};
-
struct rxe_queue {
struct rxe_dev *rxe;
struct rxe_queue_buf *buf;
@@ -46,7 +26,7 @@ struct rxe_queue {
size_t buf_size;
size_t elem_size;
unsigned int log2_elem_size;
- unsigned int index_mask;
+ u32 index_mask;
};
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
@@ -76,26 +56,56 @@ static inline int next_index(struct rxe_queue *q, int index)
static inline int queue_empty(struct rxe_queue *q)
{
- return ((q->buf->producer_index - q->buf->consumer_index)
- & q->index_mask) == 0;
+ u32 prod;
+ u32 cons;
+
+ /* make sure all changes to queue complete before
+ * testing queue empty
+ */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ /* same */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+
+ return ((prod - cons) & q->index_mask) == 0;
}
static inline int queue_full(struct rxe_queue *q)
{
- return ((q->buf->producer_index + 1 - q->buf->consumer_index)
- & q->index_mask) == 0;
+ u32 prod;
+ u32 cons;
+
+ /* make sure all changes to queue complete before
+ * testing queue full
+ */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ /* same */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+
+ return ((prod + 1 - cons) & q->index_mask) == 0;
}
static inline void advance_producer(struct rxe_queue *q)
{
- q->buf->producer_index = (q->buf->producer_index + 1)
- & q->index_mask;
+ u32 prod;
+
+ prod = (q->buf->producer_index + 1) & q->index_mask;
+
+ /* make sure all changes to queue complete before
+ * changing producer index
+ */
+ smp_store_release(&q->buf->producer_index, prod);
}
static inline void advance_consumer(struct rxe_queue *q)
{
- q->buf->consumer_index = (q->buf->consumer_index + 1)
- & q->index_mask;
+ u32 cons;
+
+ cons = (q->buf->consumer_index + 1) & q->index_mask;
+
+ /* make sure all changes to queue complete before
+ * changing consumer index
+ */
+ smp_store_release(&q->buf->consumer_index, cons);
}
static inline void *producer_addr(struct rxe_queue *q)
@@ -112,12 +122,28 @@ static inline void *consumer_addr(struct rxe_queue *q)
static inline unsigned int producer_index(struct rxe_queue *q)
{
- return q->buf->producer_index;
+ u32 index;
+
+ /* make sure all changes to queue
+ * complete before getting producer index
+ */
+ index = smp_load_acquire(&q->buf->producer_index);
+ index &= q->index_mask;
+
+ return index;
}
static inline unsigned int consumer_index(struct rxe_queue *q)
{
- return q->buf->consumer_index;
+ u32 index;
+
+ /* make sure all changes to queue
+ * complete before getting consumer index
+ */
+ index = smp_load_acquire(&q->buf->consumer_index);
+ index &= q->index_mask;
+
+ return index;
}
static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index af3923bf0a36..d4917646641a 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -634,7 +634,8 @@ next_wqe:
}
if (unlikely(qp_type(qp) == IB_QPT_RC &&
- qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
+ psn_compare(qp->req.psn, (qp->comp.psn +
+ RXE_MAX_UNACKED_PSNS)) > 0)) {
qp->req.wait_psn = 1;
goto exit;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index f9c832e82552..a031514e2f41 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -244,11 +244,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe->dma.cur_sge = 0;
recv_wqe->dma.sge_offset = 0;
- /* make sure all changes to the work queue are written before we
- * update the producer pointer
- */
- smp_wmb();
-
advance_producer(rq->queue);
return 0;
@@ -265,6 +260,9 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
struct rxe_srq *srq = to_rsrq(ibsrq);
struct rxe_create_srq_resp __user *uresp = NULL;
+ if (init->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
if (udata) {
if (udata->outlen < sizeof(*uresp))
return -EINVAL;
@@ -392,6 +390,9 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
uresp = udata->outbuf;
}
+ if (init->create_flags)
+ return ERR_PTR(-EOPNOTSUPP);
+
err = rxe_qp_chk_init(rxe, init);
if (err)
goto err1;
@@ -433,6 +434,9 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct rxe_dev *rxe = to_rdev(ibqp->device);
struct rxe_qp *qp = to_rqp(ibqp);
+ if (mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err)
goto err1;
@@ -624,12 +628,6 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
if (unlikely(err))
goto err1;
- /*
- * make sure all changes to the work queue are
- * written before we update the producer pointer
- */
- smp_wmb();
-
advance_producer(sq->queue);
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
@@ -765,7 +763,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
if (attr->flags)
- return -EINVAL;
+ return -EOPNOTSUPP;
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
if (err)
@@ -1033,7 +1031,7 @@ static ssize_t parent_show(struct device *device,
struct rxe_dev *rxe =
rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
+ return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR_RO(parent);
@@ -1070,6 +1068,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.create_cq = rxe_create_cq,
.create_qp = rxe_create_qp,
.create_srq = rxe_create_srq,
+ .create_user_ah = rxe_create_ah,
.dealloc_driver = rxe_dealloc,
.dealloc_pd = rxe_dealloc_pd,
.dealloc_ucontext = rxe_dealloc_ucontext,
@@ -1118,56 +1117,18 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
int err;
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
- u64 dma_mask;
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
dev->num_comp_vectors = num_possible_cpus();
- dev->dev.parent = rxe_dma_device(rxe);
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
- dev->dev.dma_parms = &rxe->dma_parms;
- dma_set_max_seg_size(&dev->dev, UINT_MAX);
- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
- err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask);
- if (err)
- return err;
- dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
- | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
- | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
- | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
- | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
- | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
- ;
+ dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
+ BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
ib_set_device_ops(dev, &rxe_dev_ops);
err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 3414b341b709..79e0a5a878da 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -273,7 +273,6 @@ enum rxe_mem_type {
RXE_MEM_TYPE_NONE,
RXE_MEM_TYPE_DMA,
RXE_MEM_TYPE_MR,
- RXE_MEM_TYPE_FMR,
RXE_MEM_TYPE_MW,
};
@@ -352,7 +351,6 @@ struct rxe_port {
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
- struct device_dma_parameters dma_parms;
int max_ucontext;
int max_inline_data;
struct mutex usdev_lock;
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index 3450ba5081df..1b5105cbabae 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -2,7 +2,6 @@ config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
depends on INET && INFINIBAND && LIBCRC32C
depends on INFINIBAND_VIRT_DMA
- select DMA_VIRT_OPS
help
This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index e9753831ac3f..adda78996219 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -69,7 +69,6 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
- struct device_dma_parameters dma_parms;
struct net_device *netdev;
struct siw_dev_cap attrs;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 66764f7ef072..1f9e15b71504 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1047,7 +1047,7 @@ static void siw_cm_work_handler(struct work_struct *w)
cep->state);
}
}
- if (rv && rv != EAGAIN)
+ if (rv && rv != -EAGAIN)
release_cep = 1;
break;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 181e06c1c43d..ee95cf29179d 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -305,25 +305,8 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
{
struct siw_device *sdev = NULL;
struct ib_device *base_dev;
- struct device *parent = netdev->dev.parent;
- u64 dma_mask;
int rv;
- if (!parent) {
- /*
- * The loopback device has no parent device,
- * so it appears as a top-level device. To support
- * loopback device connectivity, take this device
- * as the parent device. Skip all other devices
- * w/o parent device.
- */
- if (netdev->type != ARPHRD_LOOPBACK) {
- pr_warn("siw: device %s error: no parent device\n",
- netdev->name);
- return NULL;
- }
- parent = &netdev->dev;
- }
sdev = ib_alloc_device(siw_device, base_dev);
if (!sdev)
return NULL;
@@ -347,30 +330,8 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
addr);
}
- base_dev->uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
+
+ base_dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
base_dev->node_type = RDMA_NODE_RNIC;
memcpy(base_dev->node_desc, SIW_NODE_DESC_COMMON,
@@ -382,13 +343,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
* per physical port.
*/
base_dev->phys_port_cnt = 1;
- base_dev->dev.parent = parent;
- base_dev->dev.dma_parms = &sdev->dma_parms;
- dma_set_max_seg_size(&base_dev->dev, UINT_MAX);
- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
- if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask))
- goto error;
-
base_dev->num_comp_vectors = num_possible_cpus();
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
@@ -430,7 +384,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
atomic_set(&sdev->num_mr, 0);
atomic_set(&sdev->num_pd, 0);
- sdev->numa_node = dev_to_node(parent);
+ sdev->numa_node = dev_to_node(&netdev->dev);
spin_lock_init(&sdev->lock);
return sdev;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 7cf3242ffb41..68fd053fc774 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -307,6 +307,9 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
siw_dbg(base_dev, "create new QP\n");
+ if (attrs->create_flags)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
siw_dbg(base_dev, "too many QP's\n");
rv = -ENOMEM;
@@ -544,6 +547,9 @@ int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
if (!attr_mask)
return 0;
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
memset(&new_attrs, 0, sizeof(new_attrs));
if (attr_mask & IB_QP_ACCESS_FLAGS) {
@@ -1094,6 +1100,9 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
struct siw_cq *cq = to_siw_cq(base_cq);
int rv, size = attr->cqe;
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
siw_dbg(base_cq->device, "too many CQ's\n");
rv = -ENOMEM;
@@ -1555,6 +1564,9 @@ int siw_create_srq(struct ib_srq *base_srq,
base_ucontext);
int rv;
+ if (init_attrs->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
rv = -ENOMEM;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 8f0b598a46ec..d5d592bdab35 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1514,9 +1514,9 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
- return sprintf(buf, "connected\n");
+ return sysfs_emit(buf, "connected\n");
else
- return sprintf(buf, "datagram\n");
+ return sysfs_emit(buf, "datagram\n");
}
static ssize_t set_mode(struct device *d, struct device_attribute *attr,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 67a21fdf5367..823f6831e7ea 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -166,6 +166,10 @@ static inline int ib_speed_enum_to_int(int speed)
return SPEED_14000;
case IB_SPEED_EDR:
return SPEED_25000;
+ case IB_SPEED_HDR:
+ return SPEED_50000;
+ case IB_SPEED_NDR:
+ return SPEED_100000;
}
return SPEED_UNKNOWN;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index abfab89423f4..a6f413491321 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2266,7 +2266,7 @@ static ssize_t show_pkey(struct device *dev,
struct net_device *ndev = to_net_dev(dev);
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
- return sprintf(buf, "0x%04x\n", priv->pkey);
+ return sysfs_emit(buf, "0x%04x\n", priv->pkey);
}
static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
@@ -2276,7 +2276,8 @@ static ssize_t show_umcast(struct device *dev,
struct net_device *ndev = to_net_dev(dev);
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
- return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
+ return sysfs_emit(buf, "%d\n",
+ test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
}
void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
@@ -2446,7 +2447,7 @@ static ssize_t dev_id_show(struct device *dev,
"\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
current->comm);
- return sprintf(buf, "%#x\n", ndev->dev_id);
+ return sysfs_emit(buf, "%#x\n", ndev->dev_id);
}
static DEVICE_ATTR_RO(dev_id);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 587252fd6f57..5a150a080ac2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -158,6 +158,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
int ret, size, req_vec;
int i;
+ static atomic_t counter;
size = ipoib_recvq_size + 1;
ret = ipoib_cm_dev_init(dev);
@@ -171,8 +172,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (ret != -EOPNOTSUPP)
return ret;
- req_vec = (priv->port - 1) * 2;
-
+ req_vec = atomic_inc_return(&counter) * 2;
cq_attr.cqe = size;
cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors;
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 4c50a87ed7cc..5958840dbeed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -46,7 +46,7 @@ static ssize_t show_parent(struct device *d, struct device_attribute *attr,
struct net_device *dev = to_net_dev(d);
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- return sprintf(buf, "%s\n", priv->parent->name);
+ return sysfs_emit(buf, "%s\n", priv->parent->name);
}
static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 3690e28cc7ea..4792b9bf400f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -187,23 +187,14 @@ iser_initialize_task_headers(struct iscsi_task *task,
struct iser_device *device = iser_conn->ib_conn.device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
- const bool mgmt_task = !task->sc && !in_interrupt();
- int ret = 0;
- if (unlikely(mgmt_task))
- mutex_lock(&iser_conn->state_mutex);
-
- if (unlikely(iser_conn->state != ISER_CONN_UP)) {
- ret = -ENODEV;
- goto out;
- }
+ if (unlikely(iser_conn->state != ISER_CONN_UP))
+ return -ENODEV;
dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
- if (ib_dma_mapping_error(device->ib_device, dma_addr)) {
- ret = -ENOMEM;
- goto out;
- }
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ return -ENOMEM;
tx_desc->inv_wr.next = NULL;
tx_desc->reg_wr.wr.next = NULL;
@@ -214,11 +205,8 @@ iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
iser_task->iser_conn = iser_conn;
-out:
- if (unlikely(mgmt_task))
- mutex_unlock(&iser_conn->state_mutex);
- return ret;
+ return 0;
}
/**
@@ -739,7 +727,7 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
}
/**
- * iscsi_iser_set_param() - set class connection parameter
+ * iscsi_iser_conn_get_stats() - get iscsi connection statistics
* @cls_conn: iscsi class connection
* @stats: iscsi stats to output
*
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 436e17f1d0e5..2ba27221ea85 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -28,6 +28,18 @@ static int isert_debug_level;
module_param_named(debug_level, isert_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
+static int isert_sg_tablesize_set(const char *val,
+ const struct kernel_param *kp);
+static const struct kernel_param_ops sg_tablesize_ops = {
+ .set = isert_sg_tablesize_set,
+ .get = param_get_int,
+};
+
+static int isert_sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE;
+module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
+MODULE_PARM_DESC(sg_tablesize,
+ "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 256, max: 4096)");
+
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
static struct workqueue_struct *isert_comp_wq;
@@ -47,6 +59,19 @@ static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static int isert_sg_tablesize_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0, ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < ISCSI_ISER_MIN_SG_TABLESIZE ||
+ n > ISCSI_ISER_MAX_SG_TABLESIZE)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
{
@@ -101,7 +126,7 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num,
- ISCSI_ISER_MAX_SG_TABLESIZE);
+ isert_sg_tablesize);
attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor;
attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge;
attr.cap.max_recv_sge = 1;
@@ -1076,7 +1101,7 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
sequence_cmd:
rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
- if (!rc && dump_payload == false && unsol_data)
+ if (!rc && !dump_payload && unsol_data)
iscsit_set_unsolicited_dataout(cmd);
else if (dump_payload && imm_data)
target_put_sess_cmd(&cmd->se_cmd);
@@ -1528,12 +1553,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
}
sec_offset_err = mr_status.sig_err.sig_err_offset;
do_div(sec_offset_err, block_size);
- se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
+ se_cmd->sense_info = sec_offset_err + se_cmd->t_task_lba;
isert_err("PI error found type %d at sector 0x%llx "
"expected 0x%x vs actual 0x%x\n",
mr_status.sig_err.err_type,
- (unsigned long long)se_cmd->bad_sector,
+ (unsigned long long)se_cmd->sense_info,
mr_status.sig_err.expected,
mr_status.sig_err.actual);
ret = 1;
@@ -2471,7 +2496,7 @@ isert_wait4cmds(struct iscsi_conn *conn)
isert_info("iscsi_conn %p\n", conn);
if (conn->sess) {
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+ target_stop_session(conn->sess->se_sess);
target_wait_for_sess_cmds(conn->sess->se_sess);
}
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 7fee4a65e181..6c5af13db4e0 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -65,6 +65,12 @@
*/
#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
+/* Default I/O size is 1MB */
+#define ISCSI_ISER_DEF_SG_TABLESIZE 256
+
+/* Minimum I/O size is 512KB */
+#define ISCSI_ISER_MIN_SG_TABLESIZE 128
+
/* Maximum support is 16MB I/O size */
#define ISCSI_ISER_MAX_SG_TABLESIZE 4096
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
index f64519872297..012fc27c5c93 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -437,7 +437,7 @@ struct opa_veswport_trap {
} __packed;
/**
- * struct opa_vnic_iface_macs_entry - single entry in the mac list
+ * struct opa_vnic_iface_mac_entry - single entry in the mac list
* @mac_addr: MAC address
*/
struct opa_vnic_iface_mac_entry {
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
index 868b5aec1537..292c037aa239 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
@@ -74,7 +74,7 @@ void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event)
}
/**
- * opa_vnic_get_error_counters - get summary counters
+ * opa_vnic_get_summary_counters - get summary counters
* @adapter: vnic port adapter
* @cntrs: pointer to destination summary counters structure
*
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index ac4c49cbf153..ba00f0de14ca 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -52,7 +52,8 @@ static ssize_t max_reconnect_attempts_show(struct device *dev,
{
struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
- return sprintf(page, "%d\n", rtrs_clt_get_max_reconnect_attempts(clt));
+ return sysfs_emit(page, "%d\n",
+ rtrs_clt_get_max_reconnect_attempts(clt));
}
static ssize_t max_reconnect_attempts_store(struct device *dev,
@@ -95,11 +96,13 @@ static ssize_t mpath_policy_show(struct device *dev,
switch (clt->mp_policy) {
case MP_POLICY_RR:
- return sprintf(page, "round-robin (RR: %d)\n", clt->mp_policy);
+ return sysfs_emit(page, "round-robin (RR: %d)\n",
+ clt->mp_policy);
case MP_POLICY_MIN_INFLIGHT:
- return sprintf(page, "min-inflight (MI: %d)\n", clt->mp_policy);
+ return sysfs_emit(page, "min-inflight (MI: %d)\n",
+ clt->mp_policy);
default:
- return sprintf(page, "Unknown (%d)\n", clt->mp_policy);
+ return sysfs_emit(page, "Unknown (%d)\n", clt->mp_policy);
}
}
@@ -138,9 +141,10 @@ static DEVICE_ATTR_RW(mpath_policy);
static ssize_t add_path_show(struct device *dev,
struct device_attribute *attr, char *page)
{
- return scnprintf(page, PAGE_SIZE,
- "Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
- attr->attr.name);
+ return sysfs_emit(
+ page,
+ "Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
}
static ssize_t add_path_store(struct device *dev,
@@ -184,20 +188,18 @@ static ssize_t rtrs_clt_state_show(struct kobject *kobj,
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
if (sess->state == RTRS_CLT_CONNECTED)
- return sprintf(page, "connected\n");
+ return sysfs_emit(page, "connected\n");
- return sprintf(page, "disconnected\n");
+ return sysfs_emit(page, "disconnected\n");
}
static struct kobj_attribute rtrs_clt_state_attr =
__ATTR(state, 0444, rtrs_clt_state_show, NULL);
static ssize_t rtrs_clt_reconnect_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
- attr->attr.name);
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
}
static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
@@ -225,11 +227,9 @@ static struct kobj_attribute rtrs_clt_reconnect_attr =
rtrs_clt_reconnect_store);
static ssize_t rtrs_clt_disconnect_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
- attr->attr.name);
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
}
static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
@@ -257,11 +257,9 @@ static struct kobj_attribute rtrs_clt_disconnect_attr =
rtrs_clt_disconnect_store);
static ssize_t rtrs_clt_remove_path_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
- attr->attr.name);
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
}
static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
@@ -324,7 +322,7 @@ static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
sess = container_of(kobj, typeof(*sess), kobj);
- return scnprintf(page, PAGE_SIZE, "%u\n", sess->hca_port);
+ return sysfs_emit(page, "%u\n", sess->hca_port);
}
static struct kobj_attribute rtrs_clt_hca_port_attr =
@@ -338,7 +336,7 @@ static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- return scnprintf(page, PAGE_SIZE, "%s\n", sess->hca_name);
+ return sysfs_emit(page, "%s\n", sess->hca_name);
}
static struct kobj_attribute rtrs_clt_hca_name_attr =
@@ -349,12 +347,13 @@ static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
char *page)
{
struct rtrs_clt_sess *sess;
- int cnt;
+ int len;
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
- page, PAGE_SIZE);
- return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+ len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
+ PAGE_SIZE);
+ len += sysfs_emit_at(page, len, "\n");
+ return len;
}
static struct kobj_attribute rtrs_clt_src_addr_attr =
@@ -365,12 +364,13 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
char *page)
{
struct rtrs_clt_sess *sess;
- int cnt;
+ int len;
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
- page, PAGE_SIZE);
- return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+ len = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, page,
+ PAGE_SIZE);
+ len += sysfs_emit_at(page, len, "\n");
+ return len;
}
static struct kobj_attribute rtrs_clt_dst_addr_attr =
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index f298adc02acb..67f86c405a26 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -157,12 +157,6 @@ void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
}
EXPORT_SYMBOL(rtrs_clt_put_permit);
-void *rtrs_permit_to_pdu(struct rtrs_permit *permit)
-{
- return permit + 1;
-}
-EXPORT_SYMBOL(rtrs_permit_to_pdu);
-
/**
* rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
* @sess: client session pointer
@@ -1236,8 +1230,7 @@ static void free_sess_reqs(struct rtrs_clt_sess *sess)
if (req->mr)
ib_dereg_mr(req->mr);
kfree(req->sge);
- rtrs_iu_free(req->iu, DMA_TO_DEVICE,
- sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
}
kfree(sess->reqs);
sess->reqs = NULL;
@@ -1499,6 +1492,7 @@ static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
con->c.cid = cid;
con->c.sess = &sess->s;
atomic_set(&con->io_cnt, 0);
+ mutex_init(&con->con_mutex);
sess->s.con[cid] = &con->c;
@@ -1510,6 +1504,7 @@ static void destroy_con(struct rtrs_clt_con *con)
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
sess->s.con[con->c.cid] = NULL;
+ mutex_destroy(&con->con_mutex);
kfree(con);
}
@@ -1520,15 +1515,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp;
- /*
- * This function can fail, but still destroy_con_cq_qp() should
- * be called, this is because create_con_cq_qp() is called on cm
- * event path, thus caller/waiter never knows: have we failed before
- * create_con_cq_qp() or after. To solve this dilemma without
- * creating any additional flags just allow destroy_con_cq_qp() be
- * called many times.
- */
-
+ lockdep_assert_held(&con->con_mutex);
if (con->c.cid == 0) {
/*
* One completion for each receive and two for each send
@@ -1602,11 +1589,10 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con)
* Be careful here: destroy_con_cq_qp() can be called even
* create_con_cq_qp() failed, see comments there.
*/
-
+ lockdep_assert_held(&con->con_mutex);
rtrs_cq_qp_destroy(&con->c);
if (con->rsp_ius) {
- rtrs_iu_free(con->rsp_ius, DMA_FROM_DEVICE,
- sess->s.dev->ib_dev, con->queue_size);
+ rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size);
con->rsp_ius = NULL;
con->queue_size = 0;
}
@@ -1634,16 +1620,16 @@ static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
struct rtrs_sess *s = con->c.sess;
int err;
+ mutex_lock(&con->con_mutex);
err = create_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
if (err) {
rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
return err;
}
err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
- if (err) {
+ if (err)
rtrs_err(s, "Resolving route failed, err: %d\n", err);
- destroy_con_cq_qp(con);
- }
return err;
}
@@ -1837,8 +1823,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
cm_err = rtrs_rdma_route_resolved(con);
break;
case RDMA_CM_EVENT_ESTABLISHED:
- con->cm_err = rtrs_rdma_conn_established(con, ev);
- if (likely(!con->cm_err)) {
+ cm_err = rtrs_rdma_conn_established(con, ev);
+ if (likely(!cm_err)) {
/*
* Report success and wake up. Here we abuse state_wq,
* i.e. wake up without state change, but we set cm_err.
@@ -1851,20 +1837,22 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_REJECTED:
cm_err = rtrs_rdma_conn_rejected(con, ev);
break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ /* No message for disconnecting */
+ cm_err = -ECONNRESET;
+ break;
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
rtrs_wrn(s, "CM error event %d\n", ev->event);
cm_err = -ECONNRESET;
break;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
+ rtrs_wrn(s, "CM error event %d\n", ev->event);
cm_err = -EHOSTUNREACH;
break;
- case RDMA_CM_EVENT_DISCONNECTED:
- case RDMA_CM_EVENT_ADDR_CHANGE:
- case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- cm_err = -ECONNRESET;
- break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
/*
* Device removal is a special case. Queue close and return 0.
@@ -1949,8 +1937,9 @@ static int create_cm(struct rtrs_clt_con *con)
errr:
stop_cm(con);
- /* Is safe to call destroy if cq_qp is not inited */
+ mutex_lock(&con->con_mutex);
destroy_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
destroy_cm:
destroy_cm(con);
@@ -2057,7 +2046,9 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
if (!sess->s.con[cid])
break;
con = to_clt_con(sess->s.con[cid]);
+ mutex_lock(&con->con_mutex);
destroy_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
destroy_cm(con);
destroy_con(con);
}
@@ -2164,8 +2155,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_mutex);
}
-static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess,
- struct rtrs_addr *addr)
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess)
{
struct rtrs_clt *clt = sess->clt;
@@ -2224,7 +2214,10 @@ destroy:
struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
stop_cm(con);
+
+ mutex_lock(&con->con_mutex);
destroy_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
destroy_cm(con);
destroy_con(con);
}
@@ -2245,7 +2238,7 @@ static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(sess->clt, "Sess info request send failed: %s\n",
@@ -2264,8 +2257,12 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
int i, sgi;
sg_cnt = le16_to_cpu(msg->sg_cnt);
- if (unlikely(!sg_cnt))
+ if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
+ rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
+ sg_cnt);
return -EINVAL;
+ }
+
/*
* Check if IB immediate data size is enough to hold the mem_id and
* the offset inside the memory chunk.
@@ -2278,11 +2275,6 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
return -EINVAL;
}
- if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
- rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
- sg_cnt);
- return -EINVAL;
- }
total_len = 0;
for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
const struct rtrs_sg_desc *desc = &msg->desc[sgi];
@@ -2374,7 +2366,7 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
out:
rtrs_clt_update_wc_stats(con);
- rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
rtrs_clt_change_state(sess, state);
}
@@ -2436,9 +2428,9 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
out:
if (tx_iu)
- rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
if (rx_iu)
- rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
if (unlikely(err))
/* If we've never taken async path because of malloc problems */
rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
@@ -2938,7 +2930,7 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
* IO will never grab it. Also it is very important to add
* path before init, since init fires LINK_CONNECTED event.
*/
- rtrs_clt_add_path_to_arr(sess, addr);
+ rtrs_clt_add_path_to_arr(sess);
err = init_sess(sess);
if (err)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index 167acd3c90fc..b8dbd701b3cb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -72,6 +72,7 @@ struct rtrs_clt_con {
struct rtrs_iu *rsp_ius;
u32 queue_size;
unsigned int cpu;
+ struct mutex con_mutex;
atomic_t io_cnt;
int cm_err;
};
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index b8e43dc4d95a..3f2918671dbe 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -287,8 +287,7 @@ struct rtrs_msg_rdma_hdr {
struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t t,
struct ib_device *dev, enum dma_data_direction,
void (*done)(struct ib_cq *cq, struct ib_wc *wc));
-void rtrs_iu_free(struct rtrs_iu *iu, enum dma_data_direction dir,
- struct ib_device *dev, u32 queue_size);
+void rtrs_iu_free(struct rtrs_iu *iu, struct ib_device *dev, u32 queue_size);
int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
struct ib_send_wr *head);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 07fbb063555d..d2edff3b8f0d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -27,11 +27,9 @@ static struct kobj_type ktype = {
};
static ssize_t rtrs_srv_disconnect_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
- attr->attr.name);
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
}
static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
@@ -72,8 +70,7 @@ static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
sess = container_of(kobj, typeof(*sess), kobj);
usr_con = sess->s.con[0];
- return scnprintf(page, PAGE_SIZE, "%u\n",
- usr_con->cm_id->port_num);
+ return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num);
}
static struct kobj_attribute rtrs_srv_hca_port_attr =
@@ -87,8 +84,7 @@ static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- return scnprintf(page, PAGE_SIZE, "%s\n",
- sess->s.dev->ib_dev->name);
+ return sysfs_emit(page, "%s\n", sess->s.dev->ib_dev->name);
}
static struct kobj_attribute rtrs_srv_hca_name_attr =
@@ -115,12 +111,13 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
char *page)
{
struct rtrs_srv_sess *sess;
- int cnt;
+ int len;
sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
- page, PAGE_SIZE);
- return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+ len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
+ PAGE_SIZE);
+ len += sysfs_emit_at(page, len, "\n");
+ return len;
}
static struct kobj_attribute rtrs_srv_dst_addr_attr =
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index d6f93601712e..c42fd470c4eb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -113,28 +113,18 @@ static bool __rtrs_srv_change_state(struct rtrs_srv_sess *sess,
return changed;
}
-static bool rtrs_srv_change_state_get_old(struct rtrs_srv_sess *sess,
- enum rtrs_srv_state new_state,
- enum rtrs_srv_state *old_state)
+static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
{
bool changed;
spin_lock_irq(&sess->state_lock);
- *old_state = sess->state;
changed = __rtrs_srv_change_state(sess, new_state);
spin_unlock_irq(&sess->state_lock);
return changed;
}
-static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
- enum rtrs_srv_state new_state)
-{
- enum rtrs_srv_state old_state;
-
- return rtrs_srv_change_state_get_old(sess, new_state, &old_state);
-}
-
static void free_id(struct rtrs_srv_op *id)
{
if (!id)
@@ -471,10 +461,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
void close_sess(struct rtrs_srv_sess *sess)
{
- enum rtrs_srv_state old_state;
-
- if (rtrs_srv_change_state_get_old(sess, RTRS_SRV_CLOSING,
- &old_state))
+ if (rtrs_srv_change_state(sess, RTRS_SRV_CLOSING))
queue_work(rtrs_wq, &sess->close_work);
WARN_ON(sess->state != RTRS_SRV_CLOSING);
}
@@ -577,8 +564,7 @@ static void unmap_cont_bufs(struct rtrs_srv_sess *sess)
struct rtrs_srv_mr *srv_mr;
srv_mr = &sess->mrs[i];
- rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
- sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
ib_dereg_mr(srv_mr->mr);
ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
@@ -682,8 +668,7 @@ err:
sgt = &srv_mr->sgt;
mr = srv_mr->mr;
free_iu:
- rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
- sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
dereg_mr:
ib_dereg_mr(mr);
unmap_sg:
@@ -735,7 +720,7 @@ static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(s, "Sess info response send failed: %s\n",
@@ -861,7 +846,7 @@ static int process_info_req(struct rtrs_srv_con *con,
if (unlikely(err)) {
rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
iu_free:
- rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
}
rwr_free:
kfree(rwr);
@@ -906,7 +891,7 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
goto close;
out:
- rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
return;
close:
close_sess(sess);
@@ -929,7 +914,7 @@ static int post_recv_info_req(struct rtrs_srv_con *con)
err = rtrs_iu_post_recv(&con->c, rx_iu);
if (unlikely(err)) {
rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
- rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
return err;
}
@@ -1328,17 +1313,42 @@ static void rtrs_srv_dev_release(struct device *dev)
kfree(srv);
}
-static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
- const uuid_t *paths_uuid)
+static void free_srv(struct rtrs_srv *srv)
+{
+ int i;
+
+ WARN_ON(refcount_read(&srv->refcount));
+ for (i = 0; i < srv->queue_depth; i++)
+ mempool_free(srv->chunks[i], chunk_pool);
+ kfree(srv->chunks);
+ mutex_destroy(&srv->paths_mutex);
+ mutex_destroy(&srv->paths_ev_mutex);
+ /* last put to release the srv structure */
+ put_device(&srv->dev);
+}
+
+static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
{
struct rtrs_srv *srv;
int i;
+ mutex_lock(&ctx->srv_mutex);
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
+ if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
+ refcount_inc_not_zero(&srv->refcount)) {
+ mutex_unlock(&ctx->srv_mutex);
+ return srv;
+ }
+ }
+
+ /* need to allocate a new srv */
srv = kzalloc(sizeof(*srv), GFP_KERNEL);
- if (!srv)
+ if (!srv) {
+ mutex_unlock(&ctx->srv_mutex);
return NULL;
+ }
- refcount_set(&srv->refcount, 1);
INIT_LIST_HEAD(&srv->paths_list);
mutex_init(&srv->paths_mutex);
mutex_init(&srv->paths_ev_mutex);
@@ -1347,6 +1357,8 @@ static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
srv->ctx = ctx;
device_initialize(&srv->dev);
srv->dev.release = rtrs_srv_dev_release;
+ list_add(&srv->ctx_list, &ctx->srv_list);
+ mutex_unlock(&ctx->srv_mutex);
srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
GFP_KERNEL);
@@ -1358,7 +1370,7 @@ static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
if (!srv->chunks[i])
goto err_free_chunks;
}
- list_add(&srv->ctx_list, &ctx->srv_list);
+ refcount_set(&srv->refcount, 1);
return srv;
@@ -1369,52 +1381,9 @@ err_free_chunks:
err_free_srv:
kfree(srv);
-
- return NULL;
-}
-
-static void free_srv(struct rtrs_srv *srv)
-{
- int i;
-
- WARN_ON(refcount_read(&srv->refcount));
- for (i = 0; i < srv->queue_depth; i++)
- mempool_free(srv->chunks[i], chunk_pool);
- kfree(srv->chunks);
- mutex_destroy(&srv->paths_mutex);
- mutex_destroy(&srv->paths_ev_mutex);
- /* last put to release the srv structure */
- put_device(&srv->dev);
-}
-
-static inline struct rtrs_srv *__find_srv_and_get(struct rtrs_srv_ctx *ctx,
- const uuid_t *paths_uuid)
-{
- struct rtrs_srv *srv;
-
- list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
- if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
- refcount_inc_not_zero(&srv->refcount))
- return srv;
- }
-
return NULL;
}
-static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
- const uuid_t *paths_uuid)
-{
- struct rtrs_srv *srv;
-
- mutex_lock(&ctx->srv_mutex);
- srv = __find_srv_and_get(ctx, paths_uuid);
- if (!srv)
- srv = __alloc_srv(ctx, paths_uuid);
- mutex_unlock(&ctx->srv_mutex);
-
- return srv;
-}
-
static void put_srv(struct rtrs_srv *srv)
{
if (refcount_dec_and_test(&srv->refcount)) {
@@ -1813,7 +1782,11 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
}
recon_cnt = le16_to_cpu(msg->recon_cnt);
srv = get_or_create_srv(ctx, &msg->paths_uuid);
- if (!srv) {
+ /*
+ * "refcount == 0" happens if a previous thread calls get_or_create_srv
+ * allocate srv, but chunks of srv are not allocated yet.
+ */
+ if (!srv || refcount_read(&srv->refcount) == 0) {
err = -ENOMEM;
goto reject_w_err;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 08b0b8a6eebe..9543ae19996c 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -62,7 +62,7 @@ struct rtrs_srv_op {
/*
* server side memory region context, when always_invalidate=Y, we need
- * queue_depth of memory regrion to invalidate each memory region.
+ * queue_depth of memory region to invalidate each memory region.
*/
struct rtrs_srv_mr {
struct ib_mr *mr;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index ff1093d6e4bc..2e3a849e0a77 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -31,6 +31,7 @@ struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
return NULL;
for (i = 0; i < queue_size; i++) {
iu = &ius[i];
+ iu->direction = dir;
iu->buf = kzalloc(size, gfp_mask);
if (!iu->buf)
goto err;
@@ -41,17 +42,15 @@ struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
iu->cqe.done = done;
iu->size = size;
- iu->direction = dir;
}
return ius;
err:
- rtrs_iu_free(ius, dir, dma_dev, i);
+ rtrs_iu_free(ius, dma_dev, i);
return NULL;
}
EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
-void rtrs_iu_free(struct rtrs_iu *ius, enum dma_data_direction dir,
- struct ib_device *ibdev, u32 queue_size)
+void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_size)
{
struct rtrs_iu *iu;
int i;
@@ -61,7 +60,7 @@ void rtrs_iu_free(struct rtrs_iu *ius, enum dma_data_direction dir,
for (i = 0; i < queue_size; i++) {
iu = &ius[i];
- ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, dir);
+ ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, iu->direction);
kfree(iu->buf);
}
kfree(ius);
@@ -105,6 +104,22 @@ int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
}
EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
+static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
+ struct ib_send_wr *wr)
+{
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = wr;
+ } else {
+ head = wr;
+ }
+
+ return ib_post_send(qp, head, NULL);
+}
+
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
struct ib_send_wr *head)
{
@@ -127,17 +142,7 @@ int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
.send_flags = IB_SEND_SIGNALED,
};
- if (head) {
- struct ib_send_wr *tail = head;
-
- while (tail->next)
- tail = tail->next;
- tail->next = &wr;
- } else {
- head = &wr;
- }
-
- return ib_post_send(con->qp, head, NULL);
+ return rtrs_post_send(con->qp, head, &wr);
}
EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
@@ -169,17 +174,7 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
if (WARN_ON(sge[i].length == 0))
return -EINVAL;
- if (head) {
- struct ib_send_wr *tail = head;
-
- while (tail->next)
- tail = tail->next;
- tail->next = &wr.wr;
- } else {
- head = &wr.wr;
- }
-
- return ib_post_send(con->qp, head, NULL);
+ return rtrs_post_send(con->qp, head, &wr.wr);
}
EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
@@ -196,17 +191,7 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
.ex.imm_data = cpu_to_be32(imm_data),
};
- if (head) {
- struct ib_send_wr *tail = head;
-
- while (tail->next)
- tail = tail->next;
- tail->next = &wr;
- } else {
- head = &wr;
- }
-
- return ib_post_send(con->qp, head, NULL);
+ return rtrs_post_send(con->qp, head, &wr);
}
EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index 9af750f4d783..8738e90e715a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -63,13 +63,6 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
void rtrs_clt_close(struct rtrs_clt *sess);
-/**
- * rtrs_permit_to_pdu() - converts rtrs_permit to opaque pdu pointer
- * @permit: RTRS permit pointer, it associates the memory allocation for future
- * RDMA operation.
- */
-void *rtrs_permit_to_pdu(struct rtrs_permit *permit);
-
enum {
RTRS_PERMIT_NOWAIT = 0,
RTRS_PERMIT_WAIT = 1,
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index d8fcd21ab472..5492b66a8153 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -169,9 +169,9 @@ static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
int tmo = *(int *)kp->arg;
if (tmo >= 0)
- return sprintf(buffer, "%d\n", tmo);
+ return sysfs_emit(buffer, "%d\n", tmo);
else
- return sprintf(buffer, "off\n");
+ return sysfs_emit(buffer, "off\n");
}
static int srp_tmo_set(const char *val, const struct kernel_param *kp)
@@ -2896,7 +2896,7 @@ static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
+ return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
}
static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
@@ -2904,7 +2904,7 @@ static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
+ return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
}
static ssize_t show_service_id(struct device *dev,
@@ -2914,8 +2914,8 @@ static ssize_t show_service_id(struct device *dev,
if (target->using_rdma_cm)
return -ENOENT;
- return sprintf(buf, "0x%016llx\n",
- be64_to_cpu(target->ib_cm.service_id));
+ return sysfs_emit(buf, "0x%016llx\n",
+ be64_to_cpu(target->ib_cm.service_id));
}
static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
@@ -2925,7 +2925,8 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
if (target->using_rdma_cm)
return -ENOENT;
- return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
+
+ return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
}
static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
@@ -2933,7 +2934,7 @@ static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%pI6\n", target->sgid.raw);
+ return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
}
static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
@@ -2944,7 +2945,8 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
if (target->using_rdma_cm)
return -ENOENT;
- return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
+
+ return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
}
static ssize_t show_orig_dgid(struct device *dev,
@@ -2954,7 +2956,8 @@ static ssize_t show_orig_dgid(struct device *dev,
if (target->using_rdma_cm)
return -ENOENT;
- return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
+
+ return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
}
static ssize_t show_req_lim(struct device *dev,
@@ -2968,7 +2971,8 @@ static ssize_t show_req_lim(struct device *dev,
ch = &target->ch[i];
req_lim = min(req_lim, ch->req_lim);
}
- return sprintf(buf, "%d\n", req_lim);
+
+ return sysfs_emit(buf, "%d\n", req_lim);
}
static ssize_t show_zero_req_lim(struct device *dev,
@@ -2976,7 +2980,7 @@ static ssize_t show_zero_req_lim(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%d\n", target->zero_req_lim);
+ return sysfs_emit(buf, "%d\n", target->zero_req_lim);
}
static ssize_t show_local_ib_port(struct device *dev,
@@ -2984,7 +2988,7 @@ static ssize_t show_local_ib_port(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%d\n", target->srp_host->port);
+ return sysfs_emit(buf, "%d\n", target->srp_host->port);
}
static ssize_t show_local_ib_device(struct device *dev,
@@ -2992,8 +2996,8 @@ static ssize_t show_local_ib_device(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%s\n",
- dev_name(&target->srp_host->srp_dev->dev->dev));
+ return sysfs_emit(buf, "%s\n",
+ dev_name(&target->srp_host->srp_dev->dev->dev));
}
static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
@@ -3001,7 +3005,7 @@ static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%d\n", target->ch_count);
+ return sysfs_emit(buf, "%d\n", target->ch_count);
}
static ssize_t show_comp_vector(struct device *dev,
@@ -3009,7 +3013,7 @@ static ssize_t show_comp_vector(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%d\n", target->comp_vector);
+ return sysfs_emit(buf, "%d\n", target->comp_vector);
}
static ssize_t show_tl_retry_count(struct device *dev,
@@ -3017,7 +3021,7 @@ static ssize_t show_tl_retry_count(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%d\n", target->tl_retry_count);
+ return sysfs_emit(buf, "%d\n", target->tl_retry_count);
}
static ssize_t show_cmd_sg_entries(struct device *dev,
@@ -3025,7 +3029,7 @@ static ssize_t show_cmd_sg_entries(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%u\n", target->cmd_sg_cnt);
+ return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
}
static ssize_t show_allow_ext_sg(struct device *dev,
@@ -3033,7 +3037,7 @@ static ssize_t show_allow_ext_sg(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
+ return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
}
static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
@@ -3893,7 +3897,7 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
- return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
+ return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
@@ -3903,7 +3907,7 @@ static ssize_t show_port(struct device *dev, struct device_attribute *attr,
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
- return sprintf(buf, "%d\n", host->port);
+ return sysfs_emit(buf, "%d\n", host->port);
}
static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 53a8becac827..6be60aa5ffe2 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2085,7 +2085,7 @@ static void srpt_release_channel_work(struct work_struct *w)
se_sess = ch->sess;
BUG_ON(!se_sess);
- target_sess_cmd_list_set_waiting(se_sess);
+ target_stop_session(se_sess);
target_wait_for_sess_cmds(se_sess);
target_remove_session(se_sess);
@@ -3448,7 +3448,7 @@ static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
+ return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
}
static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
@@ -3485,7 +3485,7 @@ static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
+ return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
}
static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
@@ -3522,7 +3522,7 @@ static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
+ return sysfs_emit(page, "%u\n", sport->port_attrib.srp_sq_size);
}
static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
@@ -3559,7 +3559,7 @@ static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- return sprintf(page, "%d\n", sport->port_attrib.use_srq);
+ return sysfs_emit(page, "%d\n", sport->port_attrib.use_srq);
}
static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
@@ -3649,7 +3649,7 @@ out:
static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
{
- return sprintf(page, "%d\n", rdma_cm_port);
+ return sysfs_emit(page, "%d\n", rdma_cm_port);
}
static ssize_t srpt_rdma_cm_port_store(struct config_item *item,
@@ -3705,7 +3705,7 @@ static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- return snprintf(page, PAGE_SIZE, "%d\n", sport->enabled);
+ return sysfs_emit(page, "%d\n", sport->enabled);
}
static ssize_t srpt_tpg_enable_store(struct config_item *item,
@@ -3812,7 +3812,7 @@ static void srpt_drop_tport(struct se_wwn *wwn)
static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "\n");
+ return sysfs_emit(buf, "\n");
}
CONFIGFS_ATTR_RO(srpt_wwn_, version);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index bdeb010efee6..76e66f630c17 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -347,7 +347,7 @@ struct srpt_nexus {
};
/**
- * struct srpt_port_attib - attributes for SRPT port
+ * struct srpt_port_attrib - attributes for SRPT port
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 0687f0ed60b8..8cc8ca4a9ac0 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -215,9 +215,17 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -296,6 +304,9 @@ static const struct xpad_device {
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
{ }
};
diff --git a/drivers/input/misc/ariel-pwrbutton.c b/drivers/input/misc/ariel-pwrbutton.c
index eda86ab552b9..17bbaac8b80c 100644
--- a/drivers/input/misc/ariel-pwrbutton.c
+++ b/drivers/input/misc/ariel-pwrbutton.c
@@ -149,12 +149,6 @@ static const struct of_device_id ariel_pwrbutton_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match);
-static const struct spi_device_id ariel_pwrbutton_id_table[] = {
- { "wyse-ariel-ec-input", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_id_table);
-
static struct spi_driver ariel_pwrbutton_driver = {
.driver = {
.name = "dell-wyse-ariel-ec-input",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 3a2dcf0805f1..c74b020796a9 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 19765f1c04f7..c682b028f0a2 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -157,6 +157,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "5663", .data = &gt1x_chip_data },
{ .id = "5688", .data = &gt1x_chip_data },
{ .id = "917S", .data = &gt1x_chip_data },
+ { .id = "9286", .data = &gt1x_chip_data },
{ .id = "911", .data = &gt911_chip_data },
{ .id = "9271", .data = &gt911_chip_data },
@@ -1448,6 +1449,7 @@ static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt927" },
{ .compatible = "goodix,gt9271" },
{ .compatible = "goodix,gt928" },
+ { .compatible = "goodix,gt9286" },
{ .compatible = "goodix,gt967" },
{ }
};
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 199cf3daec10..d8fccf048bf4 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -29,11 +29,13 @@ struct ili2xxx_chip {
void *buf, size_t len);
int (*get_touch_data)(struct i2c_client *client, u8 *data);
bool (*parse_touch_data)(const u8 *data, unsigned int finger,
- unsigned int *x, unsigned int *y);
+ unsigned int *x, unsigned int *y,
+ unsigned int *z);
bool (*continue_polling)(const u8 *data, bool touch);
unsigned int max_touches;
unsigned int resolution;
bool has_calibrate_reg;
+ bool has_pressure_reg;
};
struct ili210x {
@@ -82,7 +84,8 @@ static int ili210x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili210x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
if (touchdata[0] & BIT(finger))
return false;
@@ -137,7 +140,8 @@ static int ili211x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili211x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u32 data;
@@ -169,7 +173,8 @@ static const struct ili2xxx_chip ili211x_chip = {
static bool ili212x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
@@ -235,7 +240,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili251x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
@@ -245,6 +251,7 @@ static bool ili251x_touchdata_to_coords(const u8 *touchdata,
*x = val & 0x3fff;
*y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
+ *z = touchdata[1 + (finger * 5) + 4];
return true;
}
@@ -261,6 +268,7 @@ static const struct ili2xxx_chip ili251x_chip = {
.continue_polling = ili251x_check_continue_polling,
.max_touches = 10,
.has_calibrate_reg = true,
+ .has_pressure_reg = true,
};
static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
@@ -268,14 +276,16 @@ static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
struct input_dev *input = priv->input;
int i;
bool contact = false, touch;
- unsigned int x = 0, y = 0;
+ unsigned int x = 0, y = 0, z = 0;
for (i = 0; i < priv->chip->max_touches; i++) {
- touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
+ touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z);
input_mt_slot(input, i);
if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
touchscreen_report_pos(input, &priv->prop, x, y, true);
+ if (priv->chip->has_pressure_reg)
+ input_report_abs(input, ABS_MT_PRESSURE, z);
contact = true;
}
}
@@ -437,6 +447,8 @@ static int ili210x_i2c_probe(struct i2c_client *client,
max_xy = (chip->resolution ?: SZ_64K) - 1;
input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
+ if (priv->chip->has_pressure_reg)
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
touchscreen_parse_properties(input, true, &priv->prop);
error = input_mt_init_slots(input, priv->chip->max_touches,
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index bda96762744e..b4e7bcbe9b91 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -26,6 +26,20 @@
#define ST1232_TS_NAME "st1232-ts"
#define ST1633_TS_NAME "st1633-ts"
+#define REG_STATUS 0x01 /* Device Status | Error Code */
+
+#define STATUS_NORMAL 0x00
+#define STATUS_INIT 0x01
+#define STATUS_ERROR 0x02
+#define STATUS_AUTO_TUNING 0x03
+#define STATUS_IDLE 0x04
+#define STATUS_POWER_DOWN 0x05
+
+#define ERROR_NONE 0x00
+#define ERROR_INVALID_ADDRESS 0x10
+#define ERROR_INVALID_VALUE 0x20
+#define ERROR_INVALID_PLATFORM 0x30
+
#define REG_XY_RESOLUTION 0x04
#define REG_XY_COORDINATES 0x12
#define ST_TS_MAX_FINGERS 10
@@ -47,7 +61,8 @@ struct st1232_ts_data {
u8 *read_buf;
};
-static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
+static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg,
+ unsigned int n)
{
struct i2c_client *client = ts->client;
struct i2c_msg msg[] = {
@@ -59,7 +74,7 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
{
.addr = client->addr,
.flags = I2C_M_RD | I2C_M_DMA_SAFE,
- .len = ts->read_buf_len,
+ .len = n,
.buf = ts->read_buf,
}
};
@@ -72,6 +87,22 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
return 0;
}
+static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
+{
+ unsigned int retries;
+ int error;
+
+ for (retries = 10; retries; retries--) {
+ error = st1232_ts_read_data(ts, REG_STATUS, 1);
+ if (!error && ts->read_buf[0] == (STATUS_NORMAL | ERROR_NONE))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ENXIO;
+}
+
static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
u16 *max_y)
{
@@ -79,14 +110,14 @@ static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
int error;
/* select resolution register */
- error = st1232_ts_read_data(ts, REG_XY_RESOLUTION);
+ error = st1232_ts_read_data(ts, REG_XY_RESOLUTION, 3);
if (error)
return error;
buf = ts->read_buf;
- *max_x = ((buf[0] & 0x0070) << 4) | buf[1];
- *max_y = ((buf[0] & 0x0007) << 8) | buf[2];
+ *max_x = (((buf[0] & 0x0070) << 4) | buf[1]) - 1;
+ *max_y = (((buf[0] & 0x0007) << 8) | buf[2]) - 1;
return 0;
}
@@ -140,7 +171,7 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
int count;
int error;
- error = st1232_ts_read_data(ts, REG_XY_COORDINATES);
+ error = st1232_ts_read_data(ts, REG_XY_COORDINATES, ts->read_buf_len);
if (error)
goto out;
@@ -251,6 +282,11 @@ static int st1232_ts_probe(struct i2c_client *client,
input_dev->name = "st1232-touchscreen";
input_dev->id.bustype = BUS_I2C;
+ /* Wait until device is ready */
+ error = st1232_ts_wait_ready(ts);
+ if (error)
+ return error;
+
/* Read resolution from the chip */
error = st1232_ts_read_resolution(ts, &max_x, &max_y);
if (error) {
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 41dba7090c2a..c770951a909c 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -96,9 +96,10 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
return -ENODEV;
}
/* Allow scaling to be disabled on a per-node basis */
- if (!dn || !of_device_is_available(dn)) {
+ if (!of_device_is_available(dn)) {
dev_warn(dev, "Missing property %s, skip scaling %s\n",
adj->phandle_name, node->name);
+ of_node_put(dn);
return 0;
}
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index ba43a15aefec..d7768d3c6d8a 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/interconnect-provider.h>
#include <dt-bindings/interconnect/imx8mq.h>
#include "imx.h"
@@ -94,6 +95,7 @@ static struct platform_driver imx8mq_icc_driver = {
.remove = imx8mq_icc_remove,
.driver = {
.name = "imx8mq-interconnect",
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index a8f93ba265f8..b3fb5b02bcf1 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -42,13 +42,23 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms.
+config INTERCONNECT_QCOM_RPMH_POSSIBLE
+ tristate
+ default INTERCONNECT_QCOM
+ depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
+ depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
+ depends on OF || COMPILE_TEST
+ help
+ Compile-testing RPMH drivers is possible on other platforms,
+ but in order to avoid link failures, drivers must not be built-in
+ when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
+
config INTERCONNECT_QCOM_RPMH
tristate
config INTERCONNECT_QCOM_SC7180
tristate "Qualcomm SC7180 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -57,8 +67,7 @@ config INTERCONNECT_QCOM_SC7180
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -67,8 +76,7 @@ config INTERCONNECT_QCOM_SDM845
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -77,8 +85,7 @@ config INTERCONNECT_QCOM_SM8150
config INTERCONNECT_QCOM_SM8250
tristate "Qualcomm SM8250 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 5304aea3b058..366870150cbd 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -177,6 +177,7 @@ DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
&bcm_sn9,
+ &bcm_qup0,
};
static struct qcom_icc_node *aggre1_noc_nodes[] = {
@@ -190,6 +191,7 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
[SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
+ [MASTER_QUP_1] = &qhm_qup1,
};
static const struct qcom_icc_desc sdm845_aggre1_noc = {
@@ -218,6 +220,7 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
[SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+ [MASTER_QUP_2] = &qhm_qup2,
};
static const struct qcom_icc_desc sdm845_aggre2_noc = {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 04878caf6da4..192ef8f61310 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -103,6 +103,11 @@ config IOMMU_DMA
select IRQ_MSI_IOMMU
select NEED_SG_DMA_LENGTH
+# Shared Virtual Addressing library
+config IOMMU_SVA_LIB
+ bool
+ select IOASID
+
config FSL_PAMU
bool "Freescale IOMMU support"
depends on PCI
@@ -311,6 +316,8 @@ config ARM_SMMU_V3
config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
depends on ARM_SMMU_V3
+ select IOMMU_SVA_LIB
+ select MMU_NOTIFIER
help
Support for sharing process address spaces with devices using the
SMMUv3.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 11f1771104f3..61bd30cd8369 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -27,3 +27,4 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
+obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 6b8cbdf71714..b4adab698563 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -84,12 +84,9 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
{
- if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
- return false;
-
- return !!(iommu->features & f);
+ return !!(iommu->features & mask);
}
static inline u64 iommu_virt_to_phys(void *vaddr)
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 52d70ca36169..1a0495dd5fcb 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -255,11 +255,19 @@
/* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
-#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
-#define DTE_IRQ_TABLE_LEN (9ULL << 1)
#define DTE_IRQ_REMAP_ENABLE 1ULL
+/*
+ * AMD IOMMU hardware only support 512 IRTEs despite
+ * the architectural limitation of 2048 entries.
+ */
+#define DTE_INTTAB_ALIGNMENT 128
+#define DTE_INTTABLEN_VALUE 9ULL
+#define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1)
+#define DTE_INTTABLEN_MASK (0xfULL << 1)
+#define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE)
+
#define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
@@ -379,6 +387,10 @@
#define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27
+/* IOMMU IVINFO */
+#define IOMMU_IVINFO_OFFSET 36
+#define IOMMU_IVINFO_EFRSUP BIT(0)
+
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
@@ -409,13 +421,6 @@ extern bool amd_iommu_np_cache;
/* Only true if all IOMMUs support device IOTLBs */
extern bool amd_iommu_iotlb_sup;
-/*
- * AMD IOMMU hardware only support 512 IRTEs despite
- * the architectural limitation of 2048 entries.
- */
-#define MAX_IRQS_PER_TABLE 512
-#define IRQ_TABLE_ALIGNMENT 128
-
struct irq_remap_table {
raw_spinlock_t lock;
unsigned min_index;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 5ff5687a87c7..83d8ab2aed9f 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -257,6 +257,8 @@ static void init_device_table_dma(void);
static bool amd_iommu_pre_enabled = true;
+static u32 amd_iommu_ivinfo __initdata;
+
bool translation_pre_enabled(struct amd_iommu *iommu)
{
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
@@ -296,6 +298,18 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
+/*
+ * For IVHD type 0x11/0x40, EFR is also available via IVHD.
+ * Default to IVHD EFR since it is available sooner
+ * (i.e. before PCI init).
+ */
+static void __init early_iommu_features_init(struct amd_iommu *iommu,
+ struct ivhd_header *h)
+{
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+ iommu->features = h->efr_reg;
+}
+
/* Access to l1 and l2 indexed register spaces */
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
@@ -989,10 +1003,10 @@ static bool copy_device_table(void)
irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
- int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
+ int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
if (irq_v && (int_ctl || int_tab_len)) {
if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
- (int_tab_len != DTE_IRQ_TABLE_LEN)) {
+ (int_tab_len != DTE_INTTABLEN)) {
pr_err("Wrong old irq remapping flag: %#x\n", devid);
return false;
}
@@ -1577,6 +1591,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+
+ early_iommu_features_init(iommu, h);
+
break;
default:
return -EINVAL;
@@ -1770,6 +1787,35 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL,
};
+/*
+ * Note: IVHD 0x11 and 0x40 also contains exact copy
+ * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
+ * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
+ */
+static void __init late_iommu_features_init(struct amd_iommu *iommu)
+{
+ u64 features;
+
+ if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+ return;
+
+ /* read extended feature bits */
+ features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+
+ if (!iommu->features) {
+ iommu->features = features;
+ return;
+ }
+
+ /*
+ * Sanity check and warn if EFR values from
+ * IVHD and MMIO conflict.
+ */
+ if (features != iommu->features)
+ pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
+ features, iommu->features);
+}
+
static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
@@ -1789,8 +1835,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
- /* read extended feature bits */
- iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ late_iommu_features_init(iommu);
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
@@ -1973,8 +2018,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
return r;
}
- iommu->int_enabled = true;
-
return 0;
}
@@ -2169,6 +2212,7 @@ static int iommu_init_irq(struct amd_iommu *iommu)
if (ret)
return ret;
+ iommu->int_enabled = true;
enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
@@ -2608,6 +2652,11 @@ static void __init free_dma_resources(void)
free_unity_maps();
}
+static void __init ivinfo_init(void *ivrs)
+{
+ amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
+}
+
/*
* This is the hardware init function for AMD IOMMU in the system.
* This function is called either from amd_iommu_init or from the interrupt
@@ -2662,6 +2711,8 @@ static int __init early_amd_iommu_init(void)
if (ret)
goto out;
+ ivinfo_init(ivrs_base);
+
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
@@ -2757,7 +2808,7 @@ static int __init early_amd_iommu_init(void)
remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
remap_cache_sz,
- IRQ_TABLE_ALIGNMENT,
+ DTE_INTTAB_ALIGNMENT,
0, NULL);
if (!amd_iommu_irq_cache)
goto out;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 463d322a4f3b..f0adbc48fd17 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3190,7 +3190,7 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
dte |= iommu_virt_to_phys(table->table);
dte |= DTE_IRQ_REMAP_INTCTL;
- dte |= DTE_IRQ_TABLE_LEN;
+ dte |= DTE_INTTABLEN;
dte |= DTE_IRQ_REMAP_ENABLE;
amd_iommu_dev_table[devid].data[2] = dte;
@@ -3854,6 +3854,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
struct amd_iommu *iommu;
int devid = -1;
+ if (!amd_iommu_irq_remap)
+ return 0;
+
if (x86_fwspec_is_ioapic(fwspec))
devid = get_ioapic_devid(fwspec->param[0]);
else if (x86_fwspec_is_hpet(fwspec))
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 9255c9600fb8..e13b092e6004 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -5,11 +5,35 @@
#include <linux/mm.h>
#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
#include <linux/slab.h>
#include "arm-smmu-v3.h"
+#include "../../iommu-sva-lib.h"
#include "../../io-pgtable-arm.h"
+struct arm_smmu_mmu_notifier {
+ struct mmu_notifier mn;
+ struct arm_smmu_ctx_desc *cd;
+ bool cleared;
+ refcount_t refs;
+ struct list_head list;
+ struct arm_smmu_domain *domain;
+};
+
+#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
+
+struct arm_smmu_bond {
+ struct iommu_sva sva;
+ struct mm_struct *mm;
+ struct arm_smmu_mmu_notifier *smmu_mn;
+ struct list_head list;
+ refcount_t refs;
+};
+
+#define sva_to_bond(handle) \
+ container_of(handle, struct arm_smmu_bond, sva)
+
static DEFINE_MUTEX(sva_lock);
/*
@@ -64,7 +88,6 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
return NULL;
}
-__maybe_unused
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
{
u16 asid;
@@ -145,7 +168,6 @@ out_put_context:
return err < 0 ? ERR_PTR(err) : ret;
}
-__maybe_unused
static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
{
if (arm_smmu_free_asid(cd)) {
@@ -155,6 +177,215 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
}
}
+static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
+
+ arm_smmu_atc_inv_domain(smmu_mn->domain, mm->pasid, start,
+ end - start + 1);
+}
+
+static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+ struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
+ struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+
+ mutex_lock(&sva_lock);
+ if (smmu_mn->cleared) {
+ mutex_unlock(&sva_lock);
+ return;
+ }
+
+ /*
+ * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
+ * but disable translation.
+ */
+ arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
+
+ arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
+ arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
+
+ smmu_mn->cleared = true;
+ mutex_unlock(&sva_lock);
+}
+
+static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
+{
+ kfree(mn_to_smmu(mn));
+}
+
+static struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
+ .invalidate_range = arm_smmu_mm_invalidate_range,
+ .release = arm_smmu_mm_release,
+ .free_notifier = arm_smmu_mmu_notifier_free,
+};
+
+/* Allocate or get existing MMU notifier for this {domain, mm} pair */
+static struct arm_smmu_mmu_notifier *
+arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
+ struct mm_struct *mm)
+{
+ int ret;
+ struct arm_smmu_ctx_desc *cd;
+ struct arm_smmu_mmu_notifier *smmu_mn;
+
+ list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
+ if (smmu_mn->mn.mm == mm) {
+ refcount_inc(&smmu_mn->refs);
+ return smmu_mn;
+ }
+ }
+
+ cd = arm_smmu_alloc_shared_cd(mm);
+ if (IS_ERR(cd))
+ return ERR_CAST(cd);
+
+ smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
+ if (!smmu_mn) {
+ ret = -ENOMEM;
+ goto err_free_cd;
+ }
+
+ refcount_set(&smmu_mn->refs, 1);
+ smmu_mn->cd = cd;
+ smmu_mn->domain = smmu_domain;
+ smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
+
+ ret = mmu_notifier_register(&smmu_mn->mn, mm);
+ if (ret) {
+ kfree(smmu_mn);
+ goto err_free_cd;
+ }
+
+ ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
+ if (ret)
+ goto err_put_notifier;
+
+ list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
+ return smmu_mn;
+
+err_put_notifier:
+ /* Frees smmu_mn */
+ mmu_notifier_put(&smmu_mn->mn);
+err_free_cd:
+ arm_smmu_free_shared_cd(cd);
+ return ERR_PTR(ret);
+}
+
+static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
+{
+ struct mm_struct *mm = smmu_mn->mn.mm;
+ struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
+ struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+
+ if (!refcount_dec_and_test(&smmu_mn->refs))
+ return;
+
+ list_del(&smmu_mn->list);
+ arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
+
+ /*
+ * If we went through clear(), we've already invalidated, and no
+ * new TLB entry can have been formed.
+ */
+ if (!smmu_mn->cleared) {
+ arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
+ arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
+ }
+
+ /* Frees smmu_mn */
+ mmu_notifier_put(&smmu_mn->mn);
+ arm_smmu_free_shared_cd(cd);
+}
+
+static struct iommu_sva *
+__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
+{
+ int ret;
+ struct arm_smmu_bond *bond;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (!master || !master->sva_enabled)
+ return ERR_PTR(-ENODEV);
+
+ /* If bind() was already called for this {dev, mm} pair, reuse it. */
+ list_for_each_entry(bond, &master->bonds, list) {
+ if (bond->mm == mm) {
+ refcount_inc(&bond->refs);
+ return &bond->sva;
+ }
+ }
+
+ bond = kzalloc(sizeof(*bond), GFP_KERNEL);
+ if (!bond)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate a PASID for this mm if necessary */
+ ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1);
+ if (ret)
+ goto err_free_bond;
+
+ bond->mm = mm;
+ bond->sva.dev = dev;
+ refcount_set(&bond->refs, 1);
+
+ bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
+ if (IS_ERR(bond->smmu_mn)) {
+ ret = PTR_ERR(bond->smmu_mn);
+ goto err_free_pasid;
+ }
+
+ list_add(&bond->list, &master->bonds);
+ return &bond->sva;
+
+err_free_pasid:
+ iommu_sva_free_pasid(mm);
+err_free_bond:
+ kfree(bond);
+ return ERR_PTR(ret);
+}
+
+struct iommu_sva *
+arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
+{
+ struct iommu_sva *handle;
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&sva_lock);
+ handle = __arm_smmu_sva_bind(dev, mm);
+ mutex_unlock(&sva_lock);
+ return handle;
+}
+
+void arm_smmu_sva_unbind(struct iommu_sva *handle)
+{
+ struct arm_smmu_bond *bond = sva_to_bond(handle);
+
+ mutex_lock(&sva_lock);
+ if (refcount_dec_and_test(&bond->refs)) {
+ list_del(&bond->list);
+ arm_smmu_mmu_notifier_put(bond->smmu_mn);
+ iommu_sva_free_pasid(bond->mm);
+ kfree(bond);
+ }
+ mutex_unlock(&sva_lock);
+}
+
+u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
+{
+ struct arm_smmu_bond *bond = sva_to_bond(handle);
+
+ return bond->mm->pasid;
+}
+
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
unsigned long reg, fld;
@@ -246,3 +477,12 @@ int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
return 0;
}
+
+void arm_smmu_sva_notifier_synchronize(void)
+{
+ /*
+ * Some MMU notifiers may still be waiting to be freed, using
+ * arm_smmu_mmu_notifier_free(). Wait for them.
+ */
+ mmu_notifier_synchronize();
+}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index e634bbe60573..8ca7415d785d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -33,7 +33,7 @@
#include "arm-smmu-v3.h"
-static bool disable_bypass = 1;
+static bool disable_bypass = true;
module_param(disable_bypass, bool, 0444);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
@@ -76,6 +76,12 @@ struct arm_smmu_option_prop {
DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
DEFINE_MUTEX(arm_smmu_asid_lock);
+/*
+ * Special value used by SVA when a process dies, to quiesce a CD without
+ * disabling it.
+ */
+struct arm_smmu_ctx_desc quiet_cd = { 0 };
+
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
@@ -91,11 +97,6 @@ static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
return smmu->base + offset;
}
-static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
-{
- return container_of(dom, struct arm_smmu_domain, domain);
-}
-
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -983,7 +984,9 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
* (2) Install a secondary CD, for SID+SSID traffic.
* (3) Update ASID of a CD. Atomically write the first 64 bits of the
* CD, then invalidate the old entry and mappings.
- * (4) Remove a secondary CD.
+ * (4) Quiesce the context without clearing the valid bit. Disable
+ * translation, and ignore any translation fault.
+ * (5) Remove a secondary CD.
*/
u64 val;
bool cd_live;
@@ -1000,8 +1003,10 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
val = le64_to_cpu(cdptr[0]);
cd_live = !!(val & CTXDESC_CD_0_V);
- if (!cd) { /* (4) */
+ if (!cd) { /* (5) */
val = 0;
+ } else if (cd == &quiet_cd) { /* (4) */
+ val |= CTXDESC_CD_0_TCR_EPD0;
} else if (cd_live) { /* (3) */
val &= ~CTXDESC_CD_0_ASID;
val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
@@ -1519,6 +1524,20 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
size_t inval_grain_shift = 12;
unsigned long page_start, page_end;
+ /*
+ * ATS and PASID:
+ *
+ * If substream_valid is clear, the PCIe TLP is sent without a PASID
+ * prefix. In that case all ATC entries within the address range are
+ * invalidated, including those that were requested with a PASID! There
+ * is no way to invalidate only entries without PASID.
+ *
+ * When using STRTAB_STE_1_S1DSS_SSID0 (reserving CD 0 for non-PASID
+ * traffic), translation requests without PASID create ATC entries
+ * without PASID, which must be invalidated with substream_valid clear.
+ * This has the unpleasant side-effect of invalidating all PASID-tagged
+ * ATC entries within the address range.
+ */
*cmd = (struct arm_smmu_cmdq_ent) {
.opcode = CMDQ_OP_ATC_INV,
.substream_valid = !!ssid,
@@ -1577,8 +1596,8 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
return arm_smmu_cmdq_issue_sync(master->smmu);
}
-static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
- int ssid, unsigned long iova, size_t size)
+int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
+ unsigned long iova, size_t size)
{
int i;
unsigned long flags;
@@ -1741,16 +1760,9 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
}
-static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- arm_smmu_tlb_inv_range(iova, size, granule, true, cookie);
-}
-
static const struct iommu_flush_ops arm_smmu_flush_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page = arm_smmu_tlb_inv_page_nosync,
};
@@ -1794,6 +1806,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
+ INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
return &smmu_domain->domain;
}
@@ -2589,6 +2602,9 @@ static struct iommu_ops arm_smmu_ops = {
.dev_feat_enabled = arm_smmu_dev_feature_enabled,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
+ .sva_bind = arm_smmu_sva_bind,
+ .sva_unbind = arm_smmu_sva_unbind,
+ .sva_get_pasid = arm_smmu_sva_get_pasid,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -3611,6 +3627,12 @@ static const struct of_device_id arm_smmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+static void arm_smmu_driver_unregister(struct platform_driver *drv)
+{
+ arm_smmu_sva_notifier_synchronize();
+ platform_driver_unregister(drv);
+}
+
static struct platform_driver arm_smmu_driver = {
.driver = {
.name = "arm-smmu-v3",
@@ -3621,7 +3643,8 @@ static struct platform_driver arm_smmu_driver = {
.remove = arm_smmu_device_remove,
.shutdown = arm_smmu_device_shutdown,
};
-module_platform_driver(arm_smmu_driver);
+module_driver(arm_smmu_driver, platform_driver_register,
+ arm_smmu_driver_unregister);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will@kernel.org>");
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index d4b7f40ccb02..96c2e9565e00 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -678,15 +678,25 @@ struct arm_smmu_domain {
struct list_head devices;
spinlock_t devices_lock;
+
+ struct list_head mmu_notifiers;
};
+static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct arm_smmu_domain, domain);
+}
+
extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock;
+extern struct arm_smmu_ctx_desc quiet_cd;
int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
struct arm_smmu_ctx_desc *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
+int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
+ unsigned long iova, size_t size);
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
@@ -694,6 +704,11 @@ bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
+struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm,
+ void *drvdata);
+void arm_smmu_sva_unbind(struct iommu_sva *handle);
+u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle);
+void arm_smmu_sva_notifier_synchronize(void);
#else /* CONFIG_ARM_SMMU_V3_SVA */
static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
@@ -719,5 +734,20 @@ static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
{
return -ENODEV;
}
+
+static inline struct iommu_sva *
+arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void arm_smmu_sva_unbind(struct iommu_sva *handle) {}
+
+static inline u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void arm_smmu_sva_notifier_synchronize(void) {}
#endif /* CONFIG_ARM_SMMU_V3_SVA */
#endif /* _ARM_SMMU_V3_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 88f17cc33023..136872e77195 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -12,7 +12,7 @@
static int arm_smmu_gr0_ns(int offset)
{
- switch(offset) {
+ switch (offset) {
case ARM_SMMU_GR0_sCR0:
case ARM_SMMU_GR0_sACR:
case ARM_SMMU_GR0_sGFSR:
@@ -91,15 +91,12 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
{
struct cavium_smmu *cs;
- cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
+ cs = devm_krealloc(smmu->dev, smmu, sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
- cs->smmu = *smmu;
cs->smmu.impl = &cavium_impl;
- devm_kfree(smmu->dev, smmu);
-
return &cs->smmu;
}
@@ -217,11 +214,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
return nvidia_smmu_impl_init(smmu);
- if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
- of_device_is_compatible(np, "qcom,sc7180-smmu-500") ||
- of_device_is_compatible(np, "qcom,sm8150-smmu-500") ||
- of_device_is_compatible(np, "qcom,sm8250-smmu-500"))
- return qcom_smmu_impl_init(smmu);
+ smmu = qcom_smmu_impl_init(smmu);
if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
smmu->impl = &mrvl_mmu500_impl;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 31368057e9be..29117444e5a0 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -242,18 +242,10 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
struct nvidia_smmu *nvidia_smmu;
struct platform_device *pdev = to_platform_device(dev);
- nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL);
+ nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL);
if (!nvidia_smmu)
return ERR_PTR(-ENOMEM);
- /*
- * Copy the data from struct arm_smmu_device *smmu allocated in
- * arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu
- * pointer used in arm-smmu.c once this function returns.
- * This is necessary to derive nvidia_smmu from smmu pointer passed
- * through arm_smmu_impl function calls subsequently.
- */
- nvidia_smmu->smmu = *smmu;
/* Instance 0 is ioremapped by arm-smmu.c. */
nvidia_smmu->bases[0] = smmu->base;
@@ -267,12 +259,5 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
- /*
- * Free the struct arm_smmu_device *smmu allocated in arm-smmu.c.
- * Once this function returns, arm-smmu.c would use arm_smmu_device
- * allocated as part of struct nvidia_smmu.
- */
- devm_kfree(dev, smmu);
-
return &nvidia_smmu->smmu;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 702fbaa6c9ad..bcda17012aee 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
+#include <linux/adreno-smmu-priv.h>
#include <linux/of_device.h>
#include <linux/qcom_scm.h>
@@ -10,8 +11,155 @@
struct qcom_smmu {
struct arm_smmu_device smmu;
+ bool bypass_quirk;
+ u8 bypass_cbndx;
};
+static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
+{
+ return container_of(smmu, struct qcom_smmu, smmu);
+}
+
+static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
+ u32 reg)
+{
+ /*
+ * On the GPU device we want to process subsequent transactions after a
+ * fault to keep the GPU from hanging
+ */
+ reg |= ARM_SMMU_SCTLR_HUPCF;
+
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
+}
+
+#define QCOM_ADRENO_SMMU_GPU_SID 0
+
+static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ int i;
+
+ /*
+ * The GPU will always use SID 0 so that is a handy way to uniquely
+ * identify it and configure it for per-instance pagetables
+ */
+ for (i = 0; i < fwspec->num_ids; i++) {
+ u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+
+ if (sid == QCOM_ADRENO_SMMU_GPU_SID)
+ return true;
+ }
+
+ return false;
+}
+
+static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
+ const void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct io_pgtable *pgtable =
+ io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
+ return &pgtable->cfg;
+}
+
+/*
+ * Local implementation to configure TTBR0 with the specified pagetable config.
+ * The GPU driver will call this to enable TTBR0 when per-instance pagetables
+ * are active
+ */
+
+static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
+ const struct io_pgtable_cfg *pgtbl_cfg)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
+
+ /* The domain must have split pagetables already enabled */
+ if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
+ return -EINVAL;
+
+ /* If the pagetable config is NULL, disable TTBR0 */
+ if (!pgtbl_cfg) {
+ /* Do nothing if it is already disabled */
+ if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
+ return -EINVAL;
+
+ /* Set TCR to the original configuration */
+ cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
+ cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
+ } else {
+ u32 tcr = cb->tcr[0];
+
+ /* Don't call this again if TTBR0 is already enabled */
+ if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
+ return -EINVAL;
+
+ tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
+ tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
+
+ cb->tcr[0] = tcr;
+ cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+ cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
+ }
+
+ arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
+
+ return 0;
+}
+
+static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev, int start)
+{
+ int count;
+
+ /*
+ * Assign context bank 0 to the GPU device so the GPU hardware can
+ * switch pagetables
+ */
+ if (qcom_adreno_smmu_is_gpu_device(dev)) {
+ start = 0;
+ count = 1;
+ } else {
+ start = 1;
+ count = smmu->num_context_banks;
+ }
+
+ return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
+}
+
+static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
+{
+ struct adreno_smmu_priv *priv;
+
+ /* Only enable split pagetables for the GPU device (SID 0) */
+ if (!qcom_adreno_smmu_is_gpu_device(dev))
+ return 0;
+
+ /*
+ * All targets that use the qcom,adreno-smmu compatible string *should*
+ * be AARCH64 stage 1 but double check because the arm-smmu code assumes
+ * that is the case when the TTBR1 quirk is enabled
+ */
+ if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
+ (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
+ pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
+
+ /*
+ * Initialize private interface with GPU:
+ */
+
+ priv = dev_get_drvdata(dev);
+ priv->cookie = smmu_domain;
+ priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
+ priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
+
+ return 0;
+}
+
static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,mdp4" },
@@ -23,6 +171,89 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ }
};
+static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
+{
+ unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ u32 reg;
+ u32 smr;
+ int i;
+
+ /*
+ * With some firmware versions writes to S2CR of type FAULT are
+ * ignored, and writing BYPASS will end up written as FAULT in the
+ * register. Perform a write to S2CR to detect if this is the case and
+ * if so reserve a context bank to emulate bypass streams.
+ */
+ reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
+ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
+ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
+ arm_smmu_gr0_write(smmu, last_s2cr, reg);
+ reg = arm_smmu_gr0_read(smmu, last_s2cr);
+ if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
+ qsmmu->bypass_quirk = true;
+ qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
+
+ set_bit(qsmmu->bypass_cbndx, smmu->context_map);
+
+ arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
+
+ reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
+ arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
+ }
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+
+ if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
+ smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
+ smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
+ smmu->smrs[i].valid = true;
+
+ smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
+ smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
+ smmu->s2crs[i].cbndx = 0xff;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
+{
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ u32 cbndx = s2cr->cbndx;
+ u32 type = s2cr->type;
+ u32 reg;
+
+ if (qsmmu->bypass_quirk) {
+ if (type == S2CR_TYPE_BYPASS) {
+ /*
+ * Firmware with quirky S2CR handling will substitute
+ * BYPASS writes with FAULT, so point the stream to the
+ * reserved context bank and ask for translation on the
+ * stream
+ */
+ type = S2CR_TYPE_TRANS;
+ cbndx = qsmmu->bypass_cbndx;
+ } else if (type == S2CR_TYPE_FAULT) {
+ /*
+ * Firmware with quirky S2CR handling will ignore FAULT
+ * writes, so trick it to write FAULT by asking for a
+ * BYPASS.
+ */
+ type = S2CR_TYPE_BYPASS;
+ cbndx = 0xff;
+ }
+ }
+
+ reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
+ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
+ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
+}
+
static int qcom_smmu_def_domain_type(struct device *dev)
{
const struct of_device_id *match =
@@ -61,11 +292,22 @@ static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
}
static const struct arm_smmu_impl qcom_smmu_impl = {
+ .cfg_probe = qcom_smmu_cfg_probe,
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
+ .write_s2cr = qcom_smmu_write_s2cr,
};
-struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
+static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
+ .init_context = qcom_adreno_smmu_init_context,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .reset = qcom_smmu500_reset,
+ .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
+ .write_sctlr = qcom_adreno_smmu_write_sctlr,
+};
+
+static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
+ const struct arm_smmu_impl *impl)
{
struct qcom_smmu *qsmmu;
@@ -73,14 +315,34 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
if (!qcom_scm_is_available())
return ERR_PTR(-EPROBE_DEFER);
- qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
+ qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
if (!qsmmu)
return ERR_PTR(-ENOMEM);
- qsmmu->smmu = *smmu;
-
- qsmmu->smmu.impl = &qcom_smmu_impl;
- devm_kfree(smmu->dev, smmu);
+ qsmmu->smmu.impl = impl;
return &qsmmu->smmu;
}
+
+static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
+ { .compatible = "qcom,msm8998-smmu-v2" },
+ { .compatible = "qcom,sc7180-smmu-500" },
+ { .compatible = "qcom,sdm630-smmu-v2" },
+ { .compatible = "qcom,sdm845-smmu-500" },
+ { .compatible = "qcom,sm8150-smmu-500" },
+ { .compatible = "qcom,sm8250-smmu-500" },
+ { }
+};
+
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ const struct device_node *np = smmu->dev->of_node;
+
+ if (of_match_node(qcom_smmu_impl_of_match, np))
+ return qcom_smmu_create(smmu, &qcom_smmu_impl);
+
+ if (of_device_is_compatible(np, "qcom,adreno-smmu"))
+ return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl);
+
+ return smmu;
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index dad7fa86fbd4..d8c6bfde6a61 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -333,14 +333,6 @@ static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
- ARM_SMMU_CB_S1_TLBIVAL);
- arm_smmu_tlb_sync_context(cookie);
-}
-
static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
@@ -357,14 +349,6 @@ static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
- ARM_SMMU_CB_S2_TLBIIPAS2L);
- arm_smmu_tlb_sync_context(cookie);
-}
-
static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
@@ -373,8 +357,8 @@ static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
ARM_SMMU_CB_S2_TLBIIPAS2L);
}
-static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
arm_smmu_tlb_inv_context_s2(cookie);
}
@@ -401,21 +385,18 @@ static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
.tlb_add_page = arm_smmu_tlb_add_page_s1,
};
static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
.tlb_add_page = arm_smmu_tlb_add_page_s2,
};
static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
- .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1,
.tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
};
@@ -617,7 +598,10 @@ void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
reg |= ARM_SMMU_SCTLR_E;
- arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
+ if (smmu->impl && smmu->impl->write_sctlr)
+ smmu->impl->write_sctlr(smmu, idx, reg);
+ else
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
}
static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
@@ -783,8 +767,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
goto out_clear_smmu;
}
- if (smmu_domain->non_strict)
- pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+ if (smmu_domain->pgtbl_cfg.quirks)
+ pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
@@ -929,9 +913,16 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
{
struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
- u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
- FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
- FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
+ u32 reg;
+
+ if (smmu->impl && smmu->impl->write_s2cr) {
+ smmu->impl->write_s2cr(smmu, idx);
+ return;
+ }
+
+ reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
+ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
+ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
smmu->smrs[idx].valid)
@@ -1501,15 +1492,24 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
+ case DOMAIN_ATTR_IO_PGTABLE_CFG: {
+ struct io_pgtable_domain_attr *pgtbl_cfg = data;
+ *pgtbl_cfg = smmu_domain->pgtbl_cfg;
+
+ return 0;
+ }
default:
return -ENODEV;
}
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = smmu_domain->non_strict;
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: {
+ bool non_strict = smmu_domain->pgtbl_cfg.quirks &
+ IO_PGTABLE_QUIRK_NON_STRICT;
+ *(int *)data = non_strict;
return 0;
+ }
default:
return -ENODEV;
}
@@ -1541,6 +1541,17 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
+ case DOMAIN_ATTR_IO_PGTABLE_CFG: {
+ struct io_pgtable_domain_attr *pgtbl_cfg = data;
+
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ smmu_domain->pgtbl_cfg = *pgtbl_cfg;
+ break;
+ }
default:
ret = -ENODEV;
}
@@ -1548,7 +1559,10 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- smmu_domain->non_strict = *(int *)data;
+ if (*(int *)data)
+ smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+ else
+ smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT;
break;
default:
ret = -ENODEV;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 1a746476927c..d2a2d1bc58ba 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -144,6 +144,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_SCTLR 0x0
#define ARM_SMMU_SCTLR_S1_ASIDPNE BIT(12)
#define ARM_SMMU_SCTLR_CFCFG BIT(7)
+#define ARM_SMMU_SCTLR_HUPCF BIT(8)
#define ARM_SMMU_SCTLR_CFIE BIT(6)
#define ARM_SMMU_SCTLR_CFRE BIT(5)
#define ARM_SMMU_SCTLR_E BIT(4)
@@ -363,10 +364,10 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
+ struct io_pgtable_domain_attr pgtbl_cfg;
const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
- bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
@@ -436,6 +437,8 @@ struct arm_smmu_impl {
int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu,
struct device *dev, int start);
+ void (*write_s2cr)(struct arm_smmu_device *smmu, int idx);
+ void (*write_sctlr)(struct arm_smmu_device *smmu, int idx, u32 reg);
};
#define INVALID_SMENDX -1
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index b30d6c966e2c..7f280c8d5c53 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -185,13 +185,6 @@ static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
qcom_iommu_tlb_sync(cookie);
}
-static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
- qcom_iommu_tlb_sync(cookie);
-}
-
static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
@@ -202,7 +195,6 @@ static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops qcom_flush_ops = {
.tlb_flush_all = qcom_iommu_tlb_inv_context,
.tlb_flush_walk = qcom_iommu_tlb_flush_walk,
- .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
.tlb_add_page = qcom_iommu_tlb_add_page,
};
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 0cbcd3fc3e7e..4078358ed66e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -20,9 +20,11 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/swiotlb.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
#include <linux/crash_dump.h>
+#include <linux/dma-direct.h>
struct iommu_dma_msi_page {
struct list_head list;
@@ -49,6 +51,27 @@ struct iommu_dma_cookie {
struct iommu_domain *fq_domain;
};
+void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
+ struct iommu_domain *domain)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+
+ free_cpu_cached_iovas(cpu, iovad);
+}
+
+static void iommu_dma_entry_dtor(unsigned long data)
+{
+ struct page *freelist = (struct page *)data;
+
+ while (freelist) {
+ unsigned long p = (unsigned long)page_address(freelist);
+
+ freelist = freelist->freelist;
+ free_page(p);
+ }
+}
+
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
@@ -343,7 +366,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
- NULL))
+ iommu_dma_entry_dtor))
pr_warn("iova flush queue initialization failed\n");
else
cookie->fq_domain = domain;
@@ -440,7 +463,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
}
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
- dma_addr_t iova, size_t size)
+ dma_addr_t iova, size_t size, struct page *freelist)
{
struct iova_domain *iovad = &cookie->iovad;
@@ -449,7 +472,8 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
cookie->msi_iova -= size;
else if (cookie->fq_domain) /* non-strict mode */
queue_iova(iovad, iova_pfn(iovad, iova),
- size >> iova_shift(iovad), 0);
+ size >> iova_shift(iovad),
+ (unsigned long)freelist);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
@@ -474,7 +498,32 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
if (!cookie->fq_domain)
iommu_iotlb_sync(domain, &iotlb_gather);
- iommu_dma_free_iova(cookie, dma_addr, size);
+ iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
+}
+
+static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ phys_addr_t phys;
+
+ phys = iommu_iova_to_phys(domain, dma_addr);
+ if (WARN_ON(!phys))
+ return;
+
+ __iommu_dma_unmap(dev, dma_addr, size);
+
+ if (unlikely(is_swiotlb_buffer(phys)))
+ swiotlb_tbl_unmap_single(dev, phys, size,
+ iova_align(iovad, size), dir, attrs);
+}
+
+static bool dev_is_untrusted(struct device *dev)
+{
+ return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
@@ -496,12 +545,60 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
return DMA_MAPPING_ERROR;
if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
- iommu_dma_free_iova(cookie, iova, size);
+ iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
}
+static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
+ size_t org_size, dma_addr_t dma_mask, bool coherent,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ int prot = dma_info_to_prot(dir, coherent, attrs);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t aligned_size = org_size;
+ void *padding_start;
+ size_t padding_size;
+ dma_addr_t iova;
+
+ /*
+ * If both the physical buffer start address and size are
+ * page aligned, we don't need to use a bounce page.
+ */
+ if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+ iova_offset(iovad, phys | org_size)) {
+ aligned_size = iova_align(iovad, org_size);
+ phys = swiotlb_tbl_map_single(dev, phys, org_size,
+ aligned_size, dir, attrs);
+
+ if (phys == DMA_MAPPING_ERROR)
+ return DMA_MAPPING_ERROR;
+
+ /* Cleanup the padding area. */
+ padding_start = phys_to_virt(phys);
+ padding_size = aligned_size;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE ||
+ dir == DMA_BIDIRECTIONAL)) {
+ padding_start += org_size;
+ padding_size -= org_size;
+ }
+
+ memset(padding_start, 0, padding_size);
+ }
+
+ iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
+ if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
+ swiotlb_tbl_unmap_single(dev, phys, org_size,
+ aligned_size, dir, attrs);
+
+ return iova;
+}
+
static void __iommu_dma_free_pages(struct page **pages, int count)
{
while (count--)
@@ -649,37 +746,26 @@ out_unmap:
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size);
+ iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
}
-/**
- * __iommu_dma_mmap - Map a buffer into provided user VMA
- * @pages: Array representing buffer from __iommu_dma_alloc()
- * @size: Size of buffer in bytes
- * @vma: VMA describing requested userspace mapping
- *
- * Maps the pages of the buffer in @pages into @vma. The caller is responsible
- * for verifying the correct size and protection of @vma beforehand.
- */
-static int __iommu_dma_mmap(struct page **pages, size_t size,
- struct vm_area_struct *vma)
-{
- return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
-}
-
static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
phys_addr_t phys;
- if (dev_is_dma_coherent(dev))
+ if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_cpu(phys, size, dir);
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(phys, size, dir);
+
+ if (is_swiotlb_buffer(phys))
+ swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
}
static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -687,11 +773,15 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
{
phys_addr_t phys;
- if (dev_is_dma_coherent(dev))
+ if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_device(phys, size, dir);
+ if (is_swiotlb_buffer(phys))
+ swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
+
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_device(phys, size, dir);
}
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
@@ -701,11 +791,17 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev))
+ if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;
- for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
+ for_each_sg(sgl, sg, nelems, i) {
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
+
+ if (is_swiotlb_buffer(sg_phys(sg)))
+ swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
+ dir, SYNC_FOR_CPU);
+ }
}
static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -715,11 +811,17 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev))
+ if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;
- for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
+ for_each_sg(sgl, sg, nelems, i) {
+ if (is_swiotlb_buffer(sg_phys(sg)))
+ swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
+ dir, SYNC_FOR_DEVICE);
+
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
+ }
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -728,10 +830,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
- int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;
- dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
+ dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
+ coherent, dir, attrs);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(phys, size, dir);
@@ -743,7 +845,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
- __iommu_dma_unmap(dev, dma_handle, size);
+ __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
}
/*
@@ -821,6 +923,39 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
}
}
+static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
+ sg_dma_len(s), dir, attrs);
+}
+
+static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
+ s->length, dma_get_mask(dev),
+ dev_is_dma_coherent(dev), dir, attrs);
+ if (sg_dma_address(s) == DMA_MAPPING_ERROR)
+ goto out_unmap;
+ sg_dma_len(s) = s->length;
+ }
+
+ return nents;
+
+out_unmap:
+ iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ return 0;
+}
+
/*
* The DMA API client is passing in a scatterlist which could describe
* any old buffer layout, but the IOMMU API requires everything to be
@@ -847,6 +982,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
+ if (dev_is_untrusted(dev))
+ return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
+
/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever
@@ -900,7 +1038,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
return __finalise_sg(dev, sg, nents, iova);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, iova_len);
+ iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
return 0;
@@ -916,6 +1054,11 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+ if (dev_is_untrusted(dev)) {
+ iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
+ return;
+ }
+
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
@@ -1102,7 +1245,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
struct page **pages = dma_common_find_pages(cpu_addr);
if (pages)
- return __iommu_dma_mmap(pages, size, vma);
+ return vm_map_pages(vma, pages, nr_pages);
pfn = vmalloc_to_pfn(cpu_addr);
} else {
pfn = page_to_pfn(virt_to_page(cpu_addr));
@@ -1228,7 +1371,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page;
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size);
+ iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_page:
kfree(msi_page);
return NULL;
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 5337ee1584b0..28a3d1596c76 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -13,6 +13,7 @@ config INTEL_IOMMU
select DMAR_TABLE
select SWIOTLB
select IOASID
+ select IOMMU_DMA
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b46dbfa6d0ed..02e7c10a4224 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
- if (WARN_ON_ONCE(!ALIGN(addr, align)))
- addr &= ~(align - 1);
+ if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+ addr = ALIGN_DOWN(addr, align);
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
@@ -1496,7 +1496,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
- if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+ if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
addr, size_order);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index a49afa11673c..06b00b5363d8 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -31,17 +31,16 @@
#include <linux/io.h>
#include <linux/iova.h>
#include <linux/iommu.h>
+#include <linux/dma-iommu.h>
#include <linux/intel-iommu.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/pci-ats.h>
#include <linux/memblock.h>
-#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
#include <linux/numa.h>
-#include <linux/swiotlb.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -67,8 +66,8 @@
#define MAX_AGAW_WIDTH 64
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
-#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
-#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
+#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
to match. That way, we can use 'unsigned long' for PFNs with impunity. */
@@ -355,7 +354,6 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
-static int intel_no_bounce;
static int iommu_skip_te_disable;
#define IDENTMAP_GFX 2
@@ -382,9 +380,6 @@ struct device_domain_info *get_domain_info(struct device *dev)
DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
-#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \
- to_pci_dev(d)->untrusted)
-
/*
* Iterate over elements in device_domain_list and call the specified
* callback @fn against each element.
@@ -460,9 +455,6 @@ static int __init intel_iommu_setup(char *str)
} else if (!strncmp(str, "tboot_noforce", 13)) {
pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
intel_iommu_tboot_noforce = 1;
- } else if (!strncmp(str, "nobounce", 8)) {
- pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
- intel_no_bounce = 1;
}
str += strcspn(str, ",");
@@ -726,6 +718,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
+static void domain_update_iotlb(struct dmar_domain *domain);
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
@@ -739,6 +733,20 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
*/
if (domain->nid == NUMA_NO_NODE)
domain->nid = domain_update_device_node(domain);
+
+ /*
+ * First-level translation restricts the input-address to a
+ * canonical address (i.e., address bits 63:N have the same
+ * value as address bit [N-1], where N is 48-bits with 4-level
+ * paging and 57-bits with 5-level paging). Hence, skip bit
+ * [N-1].
+ */
+ if (domain_use_first_level(domain))
+ domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
+ else
+ domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+
+ domain_update_iotlb(domain);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -1243,17 +1251,17 @@ next:
pages can only be freed after the IOTLB flush has been done. */
static struct page *domain_unmap(struct dmar_domain *domain,
unsigned long start_pfn,
- unsigned long last_pfn)
+ unsigned long last_pfn,
+ struct page *freelist)
{
- struct page *freelist;
-
BUG_ON(!domain_pfn_supported(domain, start_pfn));
BUG_ON(!domain_pfn_supported(domain, last_pfn));
BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */
freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
- domain->pgd, 0, start_pfn, last_pfn, NULL);
+ domain->pgd, 0, start_pfn, last_pfn,
+ freelist);
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
@@ -1277,13 +1285,6 @@ static void dma_free_pagelist(struct page *freelist)
}
}
-static void iova_entry_free(unsigned long data)
-{
- struct page *freelist = (struct page *)data;
-
- dma_free_pagelist(freelist);
-}
-
/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
@@ -1466,17 +1467,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
assert_spin_locked(&device_domain_lock);
- list_for_each_entry(info, &domain->devices, link) {
- struct pci_dev *pdev;
-
- if (!info->dev || !dev_is_pci(info->dev))
- continue;
-
- pdev = to_pci_dev(info->dev);
- if (pdev->ats_enabled) {
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
+
+ if (!has_iotlb_device) {
+ struct subdev_domain_info *sinfo;
+
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ if (info && info->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
}
domain->has_iotlb_device = has_iotlb_device;
@@ -1557,25 +1563,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
#endif
}
+static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
+ u64 addr, unsigned int mask)
+{
+ u16 sid, qdep;
+
+ if (!info || !info->ats_enabled)
+ return;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = info->ats_qdep;
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
+}
+
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- u16 sid, qdep;
unsigned long flags;
struct device_domain_info *info;
+ struct subdev_domain_info *sinfo;
if (!domain->has_iotlb_device)
return;
spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats_enabled)
- continue;
+ list_for_each_entry(info, &domain->devices, link)
+ __iommu_flush_dev_iotlb(info, addr, mask);
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ __iommu_flush_dev_iotlb(info, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -1648,19 +1666,17 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
iommu_flush_write_buffer(iommu);
}
-static void iommu_flush_iova(struct iova_domain *iovad)
+static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
- struct dmar_domain *domain;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
int idx;
- domain = container_of(iovad, struct dmar_domain, iovad);
-
- for_each_domain_iommu(idx, domain) {
+ for_each_domain_iommu(idx, dmar_domain) {
struct intel_iommu *iommu = g_iommus[idx];
- u16 did = domain->iommu_did[iommu->seq_id];
+ u16 did = dmar_domain->iommu_did[iommu->seq_id];
- if (domain_use_first_level(domain))
- domain_flush_piotlb(iommu, domain, 0, -1, 0);
+ if (domain_use_first_level(dmar_domain))
+ domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0);
else
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
@@ -1881,6 +1897,7 @@ static struct dmar_domain *alloc_domain(int flags)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ INIT_LIST_HEAD(&domain->subdevices);
return domain;
}
@@ -1942,48 +1959,6 @@ static int domain_detach_iommu(struct dmar_domain *domain,
return count;
}
-static struct iova_domain reserved_iova_list;
-static struct lock_class_key reserved_rbtree_key;
-
-static int dmar_init_reserved_ranges(void)
-{
- struct pci_dev *pdev = NULL;
- struct iova *iova;
- int i;
-
- init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
-
- lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
- &reserved_rbtree_key);
-
- /* IOAPIC ranges shouldn't be accessed by DMA */
- iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
- IOVA_PFN(IOAPIC_RANGE_END));
- if (!iova) {
- pr_err("Reserve IOAPIC range failed\n");
- return -ENODEV;
- }
-
- /* Reserve all PCI MMIO to avoid peer-to-peer access */
- for_each_pci_dev(pdev) {
- struct resource *r;
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- r = &pdev->resource[i];
- if (!r->flags || !(r->flags & IORESOURCE_MEM))
- continue;
- iova = reserve_iova(&reserved_iova_list,
- IOVA_PFN(r->start),
- IOVA_PFN(r->end));
- if (!iova) {
- pci_err(pdev, "Reserve iova for %pR failed\n", r);
- return -ENODEV;
- }
- }
- }
- return 0;
-}
-
static inline int guestwidth_to_adjustwidth(int gaw)
{
int agaw;
@@ -2006,12 +1981,13 @@ static void domain_exit(struct dmar_domain *domain)
/* destroy iovas */
if (domain->domain.type == IOMMU_DOMAIN_DMA)
- put_iova_domain(&domain->iovad);
+ iommu_put_dma_cookie(&domain->domain);
if (domain->pgd) {
struct page *freelist;
- freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+ freelist = domain_unmap(domain, 0,
+ DOMAIN_MAX_PFN(domain->gaw), NULL);
dma_free_pagelist(freelist);
}
@@ -2318,15 +2294,14 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
return level;
}
-static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
+static int
+__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
struct dma_pte *first_pte = NULL, *pte = NULL;
- phys_addr_t pteval;
- unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
+ phys_addr_t pteval;
u64 attr;
BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
@@ -2338,26 +2313,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
if (domain_use_first_level(domain))
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
- if (!sg) {
- sg_res = nr_pages;
- pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
- }
+ pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
while (nr_pages > 0) {
uint64_t tmp;
- if (!sg_res) {
- unsigned int pgoff = sg->offset & ~PAGE_MASK;
-
- sg_res = aligned_nrpages(sg->offset, sg->length);
- sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
- sg->dma_length = sg->length;
- pteval = (sg_phys(sg) - pgoff) | attr;
- phys_pfn = pteval >> VTD_PAGE_SHIFT;
- }
-
if (!pte) {
- largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
+ largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
+ phys_pfn, nr_pages);
first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
@@ -2369,7 +2332,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
pteval |= DMA_PTE_LARGE_PAGE;
lvl_pages = lvl_to_nr_pages(largepage_lvl);
- nr_superpages = sg_res / lvl_pages;
+ nr_superpages = nr_pages / lvl_pages;
end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
/*
@@ -2403,48 +2366,45 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
lvl_pages = lvl_to_nr_pages(largepage_lvl);
BUG_ON(nr_pages < lvl_pages);
- BUG_ON(sg_res < lvl_pages);
nr_pages -= lvl_pages;
iov_pfn += lvl_pages;
phys_pfn += lvl_pages;
pteval += lvl_pages * VTD_PAGE_SIZE;
- sg_res -= lvl_pages;
/* If the next PTE would be the first in a new page, then we
- need to flush the cache on the entries we've just written.
- And then we'll need to recalculate 'pte', so clear it and
- let it get set again in the if (!pte) block above.
-
- If we're done (!nr_pages) we need to flush the cache too.
-
- Also if we've been setting superpages, we may need to
- recalculate 'pte' and switch back to smaller pages for the
- end of the mapping, if the trailing size is not enough to
- use another superpage (i.e. sg_res < lvl_pages). */
+ * need to flush the cache on the entries we've just written.
+ * And then we'll need to recalculate 'pte', so clear it and
+ * let it get set again in the if (!pte) block above.
+ *
+ * If we're done (!nr_pages) we need to flush the cache too.
+ *
+ * Also if we've been setting superpages, we may need to
+ * recalculate 'pte' and switch back to smaller pages for the
+ * end of the mapping, if the trailing size is not enough to
+ * use another superpage (i.e. nr_pages < lvl_pages).
+ */
pte++;
if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && sg_res < lvl_pages)) {
+ (largepage_lvl > 1 && nr_pages < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
-
- if (!sg_res && nr_pages)
- sg = sg_next(sg);
}
+
return 0;
}
-static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
+static int
+domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
int iommu_id, ret;
struct intel_iommu *iommu;
/* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+ ret = __domain_mapping(domain, iov_pfn, phys_pfn, nr_pages, prot);
if (ret)
return ret;
@@ -2456,20 +2416,6 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return 0;
}
-static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long nr_pages,
- int prot)
-{
- return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
-}
-
-static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages,
- int prot)
-{
- return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
-}
-
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
unsigned long flags;
@@ -2539,16 +2485,6 @@ struct dmar_domain *find_domain(struct device *dev)
return NULL;
}
-static void do_deferred_attach(struct device *dev)
-{
- struct iommu_domain *domain;
-
- dev_iommu_priv_set(dev, NULL);
- domain = iommu_get_domain_for_dev(dev);
- if (domain)
- intel_iommu_attach_device(domain, dev);
-}
-
static inline struct device_domain_info *
dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
{
@@ -2632,7 +2568,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->iommu = iommu;
info->pasid_table = NULL;
info->auxd_enabled = 0;
- INIT_LIST_HEAD(&info->auxiliary_domains);
+ INIT_LIST_HEAD(&info->subdevices);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -2739,7 +2675,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
*/
dma_pte_clear_range(domain, first_vpfn, last_vpfn);
- return __domain_mapping(domain, first_vpfn, NULL,
+ return __domain_mapping(domain, first_vpfn,
first_vpfn, last_vpfn - first_vpfn + 1,
DMA_PTE_READ|DMA_PTE_WRITE);
}
@@ -2916,13 +2852,6 @@ static int device_def_domain_type(struct device *dev)
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
- /*
- * Prevent any device marked as untrusted from getting
- * placed into the statically identity mapping domain.
- */
- if (pdev->untrusted)
- return IOMMU_DOMAIN_DMA;
-
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return IOMMU_DOMAIN_IDENTITY;
@@ -3421,594 +3350,6 @@ error:
return ret;
}
-/* This takes a number of _MM_ pages, not VTD pages */
-static unsigned long intel_alloc_iova(struct device *dev,
- struct dmar_domain *domain,
- unsigned long nrpages, uint64_t dma_mask)
-{
- unsigned long iova_pfn;
-
- /*
- * Restrict dma_mask to the width that the iommu can handle.
- * First-level translation restricts the input-address to a
- * canonical address (i.e., address bits 63:N have the same
- * value as address bit [N-1], where N is 48-bits with 4-level
- * paging and 57-bits with 5-level paging). Hence, skip bit
- * [N-1].
- */
- if (domain_use_first_level(domain))
- dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1),
- dma_mask);
- else
- dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw),
- dma_mask);
-
- /* Ensure we reserve the whole size-aligned region */
- nrpages = __roundup_pow_of_two(nrpages);
-
- if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
- /*
- * First try to allocate an io virtual address in
- * DMA_BIT_MASK(32) and if that fails then try allocating
- * from higher range
- */
- iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
- IOVA_PFN(DMA_BIT_MASK(32)), false);
- if (iova_pfn)
- return iova_pfn;
- }
- iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
- IOVA_PFN(dma_mask), true);
- if (unlikely(!iova_pfn)) {
- dev_err_once(dev, "Allocating %ld-page iova failed\n",
- nrpages);
- return 0;
- }
-
- return iova_pfn;
-}
-
-static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
- size_t size, int dir, u64 dma_mask)
-{
- struct dmar_domain *domain;
- phys_addr_t start_paddr;
- unsigned long iova_pfn;
- int prot = 0;
- int ret;
- struct intel_iommu *iommu;
- unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
-
- BUG_ON(dir == DMA_NONE);
-
- if (unlikely(attach_deferred(dev)))
- do_deferred_attach(dev);
-
- domain = find_domain(dev);
- if (!domain)
- return DMA_MAPPING_ERROR;
-
- iommu = domain_get_iommu(domain);
- size = aligned_nrpages(paddr, size);
-
- iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
- if (!iova_pfn)
- goto error;
-
- /*
- * Check if DMAR supports zero-length reads on write only
- * mappings..
- */
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
- !cap_zlr(iommu->cap))
- prot |= DMA_PTE_READ;
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- prot |= DMA_PTE_WRITE;
- /*
- * paddr - (paddr + size) might be partial page, we should map the whole
- * page. Note: if two part of one page are separately mapped, we
- * might have two guest_addr mapping to the same host paddr, but this
- * is not a big problem
- */
- ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
- mm_to_dma_pfn(paddr_pfn), size, prot);
- if (ret)
- goto error;
-
- start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
- start_paddr += paddr & ~PAGE_MASK;
-
- trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
-
- return start_paddr;
-
-error:
- if (iova_pfn)
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
- dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
- size, (unsigned long long)paddr, dir);
- return DMA_MAPPING_ERROR;
-}
-
-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return __intel_map_single(dev, page_to_phys(page) + offset,
- size, dir, *dev->dma_mask);
-}
-
-static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
-}
-
-static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
-{
- struct dmar_domain *domain;
- unsigned long start_pfn, last_pfn;
- unsigned long nrpages;
- unsigned long iova_pfn;
- struct intel_iommu *iommu;
- struct page *freelist;
- struct pci_dev *pdev = NULL;
-
- domain = find_domain(dev);
- BUG_ON(!domain);
-
- iommu = domain_get_iommu(domain);
-
- iova_pfn = IOVA_PFN(dev_addr);
-
- nrpages = aligned_nrpages(dev_addr, size);
- start_pfn = mm_to_dma_pfn(iova_pfn);
- last_pfn = start_pfn + nrpages - 1;
-
- if (dev_is_pci(dev))
- pdev = to_pci_dev(dev);
-
- freelist = domain_unmap(domain, start_pfn, last_pfn);
- if (intel_iommu_strict || (pdev && pdev->untrusted) ||
- !has_iova_flush_queue(&domain->iovad)) {
- iommu_flush_iotlb_psi(iommu, domain, start_pfn,
- nrpages, !freelist, 0);
- /* free iova */
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
- dma_free_pagelist(freelist);
- } else {
- queue_iova(&domain->iovad, iova_pfn, nrpages,
- (unsigned long)freelist);
- /*
- * queue up the release of the unmap to save the 1/6th of the
- * cpu used up by the iotlb flush operation...
- */
- }
-
- trace_unmap_single(dev, dev_addr, size);
-}
-
-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- intel_unmap(dev, dev_addr, size);
-}
-
-static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- intel_unmap(dev, dev_addr, size);
-}
-
-static void *intel_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
-{
- struct page *page = NULL;
- int order;
-
- if (unlikely(attach_deferred(dev)))
- do_deferred_attach(dev);
-
- size = PAGE_ALIGN(size);
- order = get_order(size);
-
- if (gfpflags_allow_blocking(flags)) {
- unsigned int count = size >> PAGE_SHIFT;
-
- page = dma_alloc_from_contiguous(dev, count, order,
- flags & __GFP_NOWARN);
- }
-
- if (!page)
- page = alloc_pages(flags, order);
- if (!page)
- return NULL;
- memset(page_address(page), 0, size);
-
- *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
- DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
- if (*dma_handle != DMA_MAPPING_ERROR)
- return page_address(page);
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, order);
-
- return NULL;
-}
-
-static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- int order;
- struct page *page = virt_to_page(vaddr);
-
- size = PAGE_ALIGN(size);
- order = get_order(size);
-
- intel_unmap(dev, dma_handle, size);
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, order);
-}
-
-static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
- unsigned long nrpages = 0;
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sglist, sg, nelems, i) {
- nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
- }
-
- intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
-
- trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
-}
-
-static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir, unsigned long attrs)
-{
- int i;
- struct dmar_domain *domain;
- size_t size = 0;
- int prot = 0;
- unsigned long iova_pfn;
- int ret;
- struct scatterlist *sg;
- unsigned long start_vpfn;
- struct intel_iommu *iommu;
-
- BUG_ON(dir == DMA_NONE);
-
- if (unlikely(attach_deferred(dev)))
- do_deferred_attach(dev);
-
- domain = find_domain(dev);
- if (!domain)
- return 0;
-
- iommu = domain_get_iommu(domain);
-
- for_each_sg(sglist, sg, nelems, i)
- size += aligned_nrpages(sg->offset, sg->length);
-
- iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
- *dev->dma_mask);
- if (!iova_pfn) {
- sglist->dma_length = 0;
- return 0;
- }
-
- /*
- * Check if DMAR supports zero-length reads on write only
- * mappings..
- */
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
- !cap_zlr(iommu->cap))
- prot |= DMA_PTE_READ;
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- prot |= DMA_PTE_WRITE;
-
- start_vpfn = mm_to_dma_pfn(iova_pfn);
-
- ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
- if (unlikely(ret)) {
- dma_pte_free_pagetable(domain, start_vpfn,
- start_vpfn + size - 1,
- agaw_to_level(domain->agaw) + 1);
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
- return 0;
- }
-
- for_each_sg(sglist, sg, nelems, i)
- trace_map_sg(dev, i + 1, nelems, sg);
-
- return nelems;
-}
-
-static u64 intel_get_required_mask(struct device *dev)
-{
- return DMA_BIT_MASK(32);
-}
-
-static const struct dma_map_ops intel_dma_ops = {
- .alloc = intel_alloc_coherent,
- .free = intel_free_coherent,
- .map_sg = intel_map_sg,
- .unmap_sg = intel_unmap_sg,
- .map_page = intel_map_page,
- .unmap_page = intel_unmap_page,
- .map_resource = intel_map_resource,
- .unmap_resource = intel_unmap_resource,
- .dma_supported = dma_direct_supported,
- .mmap = dma_common_mmap,
- .get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
- .free_pages = dma_common_free_pages,
- .get_required_mask = intel_get_required_mask,
-};
-
-static void
-bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
- enum dma_data_direction dir, enum dma_sync_target target)
-{
- struct dmar_domain *domain;
- phys_addr_t tlb_addr;
-
- domain = find_domain(dev);
- if (WARN_ON(!domain))
- return;
-
- tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
- if (is_swiotlb_buffer(tlb_addr))
- swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
-}
-
-static dma_addr_t
-bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
- enum dma_data_direction dir, unsigned long attrs,
- u64 dma_mask)
-{
- size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- unsigned long iova_pfn;
- unsigned long nrpages;
- phys_addr_t tlb_addr;
- int prot = 0;
- int ret;
-
- if (unlikely(attach_deferred(dev)))
- do_deferred_attach(dev);
-
- domain = find_domain(dev);
-
- if (WARN_ON(dir == DMA_NONE || !domain))
- return DMA_MAPPING_ERROR;
-
- iommu = domain_get_iommu(domain);
- if (WARN_ON(!iommu))
- return DMA_MAPPING_ERROR;
-
- nrpages = aligned_nrpages(0, size);
- iova_pfn = intel_alloc_iova(dev, domain,
- dma_to_mm_pfn(nrpages), dma_mask);
- if (!iova_pfn)
- return DMA_MAPPING_ERROR;
-
- /*
- * Check if DMAR supports zero-length reads on write only
- * mappings..
- */
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
- !cap_zlr(iommu->cap))
- prot |= DMA_PTE_READ;
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- prot |= DMA_PTE_WRITE;
-
- /*
- * If both the physical buffer start address and size are
- * page aligned, we don't need to use a bounce page.
- */
- if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
- tlb_addr = swiotlb_tbl_map_single(dev, paddr, size,
- aligned_size, dir, attrs);
- if (tlb_addr == DMA_MAPPING_ERROR) {
- goto swiotlb_error;
- } else {
- /* Cleanup the padding area. */
- void *padding_start = phys_to_virt(tlb_addr);
- size_t padding_size = aligned_size;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE ||
- dir == DMA_BIDIRECTIONAL)) {
- padding_start += size;
- padding_size -= size;
- }
-
- memset(padding_start, 0, padding_size);
- }
- } else {
- tlb_addr = paddr;
- }
-
- ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
- tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
- if (ret)
- goto mapping_error;
-
- trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
-
- return (phys_addr_t)iova_pfn << PAGE_SHIFT;
-
-mapping_error:
- if (is_swiotlb_buffer(tlb_addr))
- swiotlb_tbl_unmap_single(dev, tlb_addr, size,
- aligned_size, dir, attrs);
-swiotlb_error:
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
- dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
- size, (unsigned long long)paddr, dir);
-
- return DMA_MAPPING_ERROR;
-}
-
-static void
-bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
- struct dmar_domain *domain;
- phys_addr_t tlb_addr;
-
- domain = find_domain(dev);
- if (WARN_ON(!domain))
- return;
-
- tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
- if (WARN_ON(!tlb_addr))
- return;
-
- intel_unmap(dev, dev_addr, size);
- if (is_swiotlb_buffer(tlb_addr))
- swiotlb_tbl_unmap_single(dev, tlb_addr, size,
- aligned_size, dir, attrs);
-
- trace_bounce_unmap_single(dev, dev_addr, size);
-}
-
-static dma_addr_t
-bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- return bounce_map_single(dev, page_to_phys(page) + offset,
- size, dir, attrs, *dev->dma_mask);
-}
-
-static dma_addr_t
-bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- return bounce_map_single(dev, phys_addr, size,
- dir, attrs, *dev->dma_mask);
-}
-
-static void
-bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- bounce_unmap_single(dev, dev_addr, size, dir, attrs);
-}
-
-static void
-bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- bounce_unmap_single(dev, dev_addr, size, dir, attrs);
-}
-
-static void
-bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sglist, sg, nelems, i)
- bounce_unmap_page(dev, sg->dma_address,
- sg_dma_len(sg), dir, attrs);
-}
-
-static int
-bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir, unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i) {
- sg->dma_address = bounce_map_page(dev, sg_page(sg),
- sg->offset, sg->length,
- dir, attrs);
- if (sg->dma_address == DMA_MAPPING_ERROR)
- goto out_unmap;
- sg_dma_len(sg) = sg->length;
- }
-
- for_each_sg(sglist, sg, nelems, i)
- trace_bounce_map_sg(dev, i + 1, nelems, sg);
-
- return nelems;
-
-out_unmap:
- bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
- return 0;
-}
-
-static void
-bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
-}
-
-static void
-bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
-}
-
-static void
-bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sglist, sg, nelems, i)
- bounce_sync_single(dev, sg_dma_address(sg),
- sg_dma_len(sg), dir, SYNC_FOR_CPU);
-}
-
-static void
-bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sglist, sg, nelems, i)
- bounce_sync_single(dev, sg_dma_address(sg),
- sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
-}
-
-static const struct dma_map_ops bounce_dma_ops = {
- .alloc = intel_alloc_coherent,
- .free = intel_free_coherent,
- .map_sg = bounce_map_sg,
- .unmap_sg = bounce_unmap_sg,
- .map_page = bounce_map_page,
- .unmap_page = bounce_unmap_page,
- .sync_single_for_cpu = bounce_sync_single_for_cpu,
- .sync_single_for_device = bounce_sync_single_for_device,
- .sync_sg_for_cpu = bounce_sync_sg_for_cpu,
- .sync_sg_for_device = bounce_sync_sg_for_device,
- .map_resource = bounce_map_resource,
- .unmap_resource = bounce_unmap_resource,
- .alloc_pages = dma_common_alloc_pages,
- .free_pages = dma_common_free_pages,
- .dma_supported = dma_direct_supported,
-};
-
static inline int iommu_domain_cache_init(void)
{
int ret = 0;
@@ -4165,7 +3506,7 @@ static int iommu_suspend(void)
for_each_active_iommu(iommu, drhd) {
iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!iommu->iommu_state)
goto nomem;
}
@@ -4636,7 +3977,8 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
struct page *freelist;
freelist = domain_unmap(si_domain,
- start_vpfn, last_vpfn);
+ start_vpfn, last_vpfn,
+ NULL);
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
@@ -4675,7 +4017,7 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
continue;
- free_cpu_cached_iovas(cpu, &domain->iovad);
+ iommu_dma_free_cpu_cached_iovas(cpu, &domain->domain);
}
}
}
@@ -4948,12 +4290,6 @@ int __init intel_iommu_init(void)
if (list_empty(&dmar_atsr_units))
pr_info("No ATSR found\n");
- if (dmar_init_reserved_ranges()) {
- if (force_on)
- panic("tboot: Failed to reserve iommu ranges\n");
- goto out_free_reserved_range;
- }
-
if (dmar_map_gfx)
intel_iommu_gfx_mapped = 1;
@@ -4964,7 +4300,7 @@ int __init intel_iommu_init(void)
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
pr_err("Initialization failed\n");
- goto out_free_reserved_range;
+ goto out_free_dmar;
}
up_write(&dmar_global_lock);
@@ -5005,8 +4341,6 @@ int __init intel_iommu_init(void)
return 0;
-out_free_reserved_range:
- put_iova_domain(&reserved_iova_list);
out_free_dmar:
intel_iommu_free_dmars();
up_write(&dmar_global_lock);
@@ -5104,17 +4438,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
return 0;
}
-static void intel_init_iova_domain(struct dmar_domain *dmar_domain)
-{
- init_iova_domain(&dmar_domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
- copy_reserved_iova(&reserved_iova_list, &dmar_domain->iovad);
-
- if (!intel_iommu_strict &&
- init_iova_flush_queue(&dmar_domain->iovad,
- iommu_flush_iova, iova_entry_free))
- pr_info("iova flush queue initialization failed\n");
-}
-
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
@@ -5134,8 +4457,9 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return NULL;
}
- if (type == IOMMU_DOMAIN_DMA)
- intel_init_iova_domain(dmar_domain);
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&dmar_domain->domain))
+ return NULL;
domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
@@ -5172,33 +4496,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
domain->type == IOMMU_DOMAIN_UNMANAGED;
}
-static void auxiliary_link_device(struct dmar_domain *domain,
- struct device *dev)
+static inline struct subdev_domain_info *
+lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
+{
+ struct subdev_domain_info *sinfo;
+
+ if (!list_empty(&domain->subdevices)) {
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ if (sinfo->pdev == dev)
+ return sinfo;
+ }
+ }
+
+ return NULL;
+}
+
+static int auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
- return;
+ return -EINVAL;
+
+ if (!sinfo) {
+ sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+ sinfo->domain = domain;
+ sinfo->pdev = dev;
+ list_add(&sinfo->link_phys, &info->subdevices);
+ list_add(&sinfo->link_domain, &domain->subdevices);
+ }
- domain->auxd_refcnt++;
- list_add(&domain->auxd, &info->auxiliary_domains);
+ return ++sinfo->users;
}
-static void auxiliary_unlink_device(struct dmar_domain *domain,
- struct device *dev)
+static int auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
+ int ret;
assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info))
- return;
+ if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
+ return -EINVAL;
- list_del(&domain->auxd);
- domain->auxd_refcnt--;
+ ret = --sinfo->users;
+ if (!ret) {
+ list_del(&sinfo->link_phys);
+ list_del(&sinfo->link_domain);
+ kfree(sinfo);
+ }
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
- ioasid_free(domain->default_pasid);
+ return ret;
}
static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -5227,6 +4579,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
}
spin_lock_irqsave(&device_domain_lock, flags);
+ ret = auxiliary_link_device(domain, dev);
+ if (ret <= 0)
+ goto link_failed;
+
+ /*
+ * Subdevices from the same physical device can be attached to the
+ * same domain. For such cases, only the first subdevice attachment
+ * needs to go through the full steps in this function. So if ret >
+ * 1, just goto out.
+ */
+ if (ret > 1)
+ goto out;
+
/*
* iommu->lock must be held to attach domain to iommu and setup the
* pasid entry for second level translation.
@@ -5245,10 +4610,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
domain->default_pasid);
if (ret)
goto table_failed;
- spin_unlock(&iommu->lock);
-
- auxiliary_link_device(domain, dev);
+ spin_unlock(&iommu->lock);
+out:
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
@@ -5257,9 +4621,11 @@ table_failed:
domain_detach_iommu(domain, iommu);
attach_failed:
spin_unlock(&iommu->lock);
+ auxiliary_unlink_device(domain, dev);
+link_failed:
spin_unlock_irqrestore(&device_domain_lock, flags);
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
- ioasid_free(domain->default_pasid);
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
+ ioasid_put(domain->default_pasid);
return ret;
}
@@ -5278,14 +4644,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
info = get_domain_info(dev);
iommu = info->iommu;
- auxiliary_unlink_device(domain, dev);
-
- spin_lock(&iommu->lock);
- intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
- domain_detach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
+ if (!auxiliary_unlink_device(domain, dev)) {
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev,
+ domain->default_pasid, false);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+ }
spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
+ ioasid_put(domain->default_pasid);
}
static int prepare_domain_attach_device(struct iommu_domain *domain,
@@ -5387,6 +4757,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
aux_domain_remove_dev(to_dmar_domain(domain), dev);
}
+#ifdef CONFIG_INTEL_IOMMU_SVM
/*
* 2D array for converting and sanitizing IOMMU generic TLB granularity to
* VT-d granularity. Invalidation is typically included in the unmap operation
@@ -5433,7 +4804,6 @@ static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
return order_base_2(nr_pages);
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
static int
intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
struct iommu_cache_invalidate_info *inv_info)
@@ -5599,8 +4969,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
/* Round up size to next multiple of PAGE_SIZE, if it and
the low bits of hpa would take us onto the next page */
size = aligned_nrpages(hpa, size);
- ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
+ ret = domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ hpa >> VTD_PAGE_SHIFT, size, prot);
return ret;
}
@@ -5609,10 +4979,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct page *freelist = NULL;
unsigned long start_pfn, last_pfn;
- unsigned int npages;
- int iommu_id, level = 0;
+ int level = 0;
/* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */
@@ -5624,22 +4992,37 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
start_pfn = iova >> VTD_PAGE_SHIFT;
last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
- freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
-
- npages = last_pfn - start_pfn + 1;
-
- for_each_domain_iommu(iommu_id, dmar_domain)
- iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
- start_pfn, npages, !freelist, 0);
-
- dma_free_pagelist(freelist);
+ gather->freelist = domain_unmap(dmar_domain, start_pfn,
+ last_pfn, gather->freelist);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
+ iommu_iotlb_gather_add_page(domain, gather, iova, size);
+
return size;
}
+static void intel_iommu_tlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long iova_pfn = IOVA_PFN(gather->start);
+ size_t size = gather->end - gather->start;
+ unsigned long start_pfn;
+ unsigned long nrpages;
+ int iommu_id;
+
+ nrpages = aligned_nrpages(gather->start, size);
+ start_pfn = mm_to_dma_pfn(iova_pfn);
+
+ for_each_domain_iommu(iommu_id, dmar_domain)
+ iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+ start_pfn, nrpages, !gather->freelist, 0);
+
+ dma_free_pagelist(gather->freelist);
+}
+
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -5750,13 +5133,13 @@ static void intel_iommu_release_device(struct device *dev)
static void intel_iommu_probe_finalize(struct device *dev)
{
- struct iommu_domain *domain;
+ dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- domain = iommu_get_domain_for_dev(dev);
- if (device_needs_bounce(dev))
- set_dma_ops(dev, &bounce_dma_ops);
- else if (domain && domain->type == IOMMU_DOMAIN_DMA)
- set_dma_ops(dev, &intel_dma_ops);
+ if (domain && domain->type == IOMMU_DOMAIN_DMA)
+ iommu_setup_dma_ops(dev, base,
+ __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
else
set_dma_ops(dev, NULL);
}
@@ -5869,19 +5252,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
return ret;
}
-static void intel_iommu_apply_resv_region(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region)
-{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long start, end;
-
- start = IOVA_PFN(region->start);
- end = IOVA_PFN(region->start + region->length - 1);
-
- WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
-}
-
static struct iommu_group *intel_iommu_device_group(struct device *dev)
{
if (dev_is_pci(dev))
@@ -6070,6 +5440,57 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
return ret;
}
+static bool domain_use_flush_queue(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool r = true;
+
+ if (intel_iommu_strict)
+ return false;
+
+ /*
+ * The flush queue implementation does not perform page-selective
+ * invalidations that are required for efficient TLB flushes in virtual
+ * environments. The benefit of batching is likely to be much lower than
+ * the overhead of synchronizing the virtual and physical IOMMU
+ * page-tables.
+ */
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!cap_caching_mode(iommu->cap))
+ continue;
+
+ pr_warn_once("IOMMU batching is disabled due to virtualization");
+ r = false;
+ break;
+ }
+ rcu_read_unlock();
+
+ return r;
+}
+
+static int
+intel_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ return -ENODEV;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = domain_use_flush_queue();
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Check that the device does not live on an external facing PCI port that is
* marked as untrusted. Such devices should not be able to apply quirks and
@@ -6091,6 +5512,7 @@ const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free,
+ .domain_get_attr = intel_iommu_domain_get_attr,
.domain_set_attr = intel_iommu_domain_set_attr,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
@@ -6099,13 +5521,14 @@ const struct iommu_ops intel_iommu_ops = {
.aux_get_pasid = intel_iommu_aux_get_pasid,
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
+ .flush_iotlb_all = intel_flush_iotlb_all,
+ .iotlb_sync = intel_iommu_tlb_sync,
.iova_to_phys = intel_iommu_iova_to_phys,
.probe_device = intel_iommu_probe_device,
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
- .apply_resv_region = intel_iommu_apply_resv_region,
.device_group = intel_iommu_device_group,
.dev_has_feat = intel_iommu_dev_has_feat,
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index aeffda92b10b..685200a5cff0 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1353,6 +1353,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data = irq_domain_get_irq_data(domain, virq + i);
irq_cfg = irqd_cfg(irq_data);
if (!irq_data || !irq_cfg) {
+ if (!i)
+ kfree(data);
ret = -EINVAL;
goto out_free_data;
}
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 3242ebd0bca3..18a9f05df407 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -118,8 +118,10 @@ void intel_svm_check(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
}
-static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
- unsigned long address, unsigned long pages, int ih)
+static void __flush_svm_range_dev(struct intel_svm *svm,
+ struct intel_svm_dev *sdev,
+ unsigned long address,
+ unsigned long pages, int ih)
{
struct qi_desc desc;
@@ -142,7 +144,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -166,7 +168,23 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
+ }
+}
+
+static void intel_flush_svm_range_dev(struct intel_svm *svm,
+ struct intel_svm_dev *sdev,
+ unsigned long address,
+ unsigned long pages, int ih)
+{
+ unsigned long shift = ilog2(__roundup_pow_of_two(pages));
+ unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
+ unsigned long start = ALIGN_DOWN(address, align);
+ unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
+
+ while (start < end) {
+ __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
+ start += align;
}
}
@@ -211,7 +229,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+ intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
svm->pasid, true);
rcu_read_unlock();
@@ -281,6 +299,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct dmar_domain *dmar_domain;
struct device_domain_info *info;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@@ -363,6 +382,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -381,12 +401,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
* each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret);
@@ -486,6 +506,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int pasid_max;
int ret;
@@ -546,6 +567,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
goto out;
}
sdev->dev = dev;
+ sdev->iommu = iommu;
ret = intel_iommu_enable_pasid(iommu, dev);
if (ret) {
@@ -575,7 +597,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
kfree(sdev);
goto out;
}
- svm->iommu = iommu;
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;
@@ -598,25 +619,25 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
if (mm) {
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
- ioasid_free(svm->pasid);
+ ioasid_put(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
}
}
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
- ioasid_free(svm->pasid);
+ ioasid_put(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@@ -632,14 +653,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
* Binding a new device with existing PASID, need to setup
* the PASID entry.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
kfree(sdev);
goto out;
@@ -689,7 +710,7 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
- ioasid_free(svm->pasid);
+ ioasid_put(svm->pasid);
if (svm->mm) {
mmu_notifier_unregister(&svm->notifier, svm->mm);
/* Clear mm's pasid. */
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index a688f22cbe3b..1d92ac948db7 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -522,14 +522,14 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
struct io_pgtable *iop = &data->iop;
int ret;
- /* If no access, then nothing to do */
- if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
- return 0;
-
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
paddr >= (1ULL << data->iop.cfg.oas)))
return -ERANGE;
+ /* If no access, then nothing to do */
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
@@ -584,7 +584,7 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
__arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
size *= ARM_V7S_CONT_PAGES;
- io_pgtable_tlb_flush_leaf(iop, iova, size, size);
+ io_pgtable_tlb_flush_walk(iop, iova, size, size);
return pte;
}
@@ -866,7 +866,6 @@ static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
- .tlb_flush_leaf = dummy_tlb_flush,
.tlb_add_page = dummy_tlb_add_page,
};
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index b34b00fadc45..87def58e79b5 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -130,7 +130,7 @@
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
-#define iopte_type(pte,l) \
+#define iopte_type(pte) \
(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
@@ -151,9 +151,9 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
enum io_pgtable_fmt fmt)
{
if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
- return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
+ return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
- return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
+ return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
}
static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
@@ -280,7 +280,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
/* We require an unmap first */
WARN_ON(!selftest_running);
return -EEXIST;
- } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
+ } else if (iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE) {
/*
* We need to unmap and free the old table before
* overwriting it with a block entry.
@@ -450,10 +450,6 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
arm_lpae_iopte prot;
long iaext = (s64)iova >> cfg->ias;
- /* If no access, then nothing to do */
- if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
- return 0;
-
if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
return -EINVAL;
@@ -462,6 +458,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iaext || paddr >> cfg->oas))
return -ERANGE;
+ /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
prot = arm_lpae_prot_to_pte(data, iommu_prot);
ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
/*
@@ -554,7 +554,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
* block, but anything else is invalid. We can't misinterpret
* a page entry here since we're never at the last level.
*/
- if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
+ if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
return 0;
tablep = iopte_deref(pte, data);
@@ -1094,7 +1094,6 @@ static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
- .tlb_flush_leaf = dummy_tlb_flush,
.tlb_add_page = dummy_tlb_add_page,
};
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
index 0f8dd377aada..50ee27bbd04e 100644
--- a/drivers/iommu/ioasid.c
+++ b/drivers/iommu/ioasid.c
@@ -2,7 +2,7 @@
/*
* I/O Address Space ID allocator. There is one global IOASID space, split into
* subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
- * free IOASIDs with ioasid_alloc and ioasid_free.
+ * free IOASIDs with ioasid_alloc and ioasid_put.
*/
#include <linux/ioasid.h>
#include <linux/module.h>
@@ -15,6 +15,7 @@ struct ioasid_data {
struct ioasid_set *set;
void *private;
struct rcu_head rcu;
+ refcount_t refs;
};
/*
@@ -314,6 +315,7 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
data->set = set;
data->private = private;
+ refcount_set(&data->refs, 1);
/*
* Custom allocator needs allocator data to perform platform specific
@@ -346,11 +348,34 @@ exit_free:
EXPORT_SYMBOL_GPL(ioasid_alloc);
/**
- * ioasid_free - Free an IOASID
+ * ioasid_get - obtain a reference to the IOASID
+ */
+void ioasid_get(ioasid_t ioasid)
+{
+ struct ioasid_data *ioasid_data;
+
+ spin_lock(&ioasid_allocator_lock);
+ ioasid_data = xa_load(&active_allocator->xa, ioasid);
+ if (ioasid_data)
+ refcount_inc(&ioasid_data->refs);
+ else
+ WARN_ON(1);
+ spin_unlock(&ioasid_allocator_lock);
+}
+EXPORT_SYMBOL_GPL(ioasid_get);
+
+/**
+ * ioasid_put - Release a reference to an ioasid
* @ioasid: the ID to remove
+ *
+ * Put a reference to the IOASID, free it when the number of references drops to
+ * zero.
+ *
+ * Return: %true if the IOASID was freed, %false otherwise.
*/
-void ioasid_free(ioasid_t ioasid)
+bool ioasid_put(ioasid_t ioasid)
{
+ bool free = false;
struct ioasid_data *ioasid_data;
spin_lock(&ioasid_allocator_lock);
@@ -360,6 +385,10 @@ void ioasid_free(ioasid_t ioasid)
goto exit_unlock;
}
+ free = refcount_dec_and_test(&ioasid_data->refs);
+ if (!free)
+ goto exit_unlock;
+
active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
/* Custom allocator needs additional steps to free the xa element */
if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
@@ -369,8 +398,9 @@ void ioasid_free(ioasid_t ioasid)
exit_unlock:
spin_unlock(&ioasid_allocator_lock);
+ return free;
}
-EXPORT_SYMBOL_GPL(ioasid_free);
+EXPORT_SYMBOL_GPL(ioasid_put);
/**
* ioasid_find - Find IOASID data
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
new file mode 100644
index 000000000000..bd41405d34e9
--- /dev/null
+++ b/drivers/iommu/iommu-sva-lib.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for IOMMU drivers implementing SVA
+ */
+#include <linux/mutex.h>
+#include <linux/sched/mm.h>
+
+#include "iommu-sva-lib.h"
+
+static DEFINE_MUTEX(iommu_sva_lock);
+static DECLARE_IOASID_SET(iommu_sva_pasid);
+
+/**
+ * iommu_sva_alloc_pasid - Allocate a PASID for the mm
+ * @mm: the mm
+ * @min: minimum PASID value (inclusive)
+ * @max: maximum PASID value (inclusive)
+ *
+ * Try to allocate a PASID for this mm, or take a reference to the existing one
+ * provided it fits within the [@min, @max] range. On success the PASID is
+ * available in mm->pasid, and must be released with iommu_sva_free_pasid().
+ * @min must be greater than 0, because 0 indicates an unused mm->pasid.
+ *
+ * Returns 0 on success and < 0 on error.
+ */
+int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
+{
+ int ret = 0;
+ ioasid_t pasid;
+
+ if (min == INVALID_IOASID || max == INVALID_IOASID ||
+ min == 0 || max < min)
+ return -EINVAL;
+
+ mutex_lock(&iommu_sva_lock);
+ if (mm->pasid) {
+ if (mm->pasid >= min && mm->pasid <= max)
+ ioasid_get(mm->pasid);
+ else
+ ret = -EOVERFLOW;
+ } else {
+ pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
+ if (pasid == INVALID_IOASID)
+ ret = -ENOMEM;
+ else
+ mm->pasid = pasid;
+ }
+ mutex_unlock(&iommu_sva_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
+
+/**
+ * iommu_sva_free_pasid - Release the mm's PASID
+ * @mm: the mm
+ *
+ * Drop one reference to a PASID allocated with iommu_sva_alloc_pasid()
+ */
+void iommu_sva_free_pasid(struct mm_struct *mm)
+{
+ mutex_lock(&iommu_sva_lock);
+ if (ioasid_put(mm->pasid))
+ mm->pasid = 0;
+ mutex_unlock(&iommu_sva_lock);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_free_pasid);
+
+/* ioasid_find getter() requires a void * argument */
+static bool __mmget_not_zero(void *mm)
+{
+ return mmget_not_zero(mm);
+}
+
+/**
+ * iommu_sva_find() - Find mm associated to the given PASID
+ * @pasid: Process Address Space ID assigned to the mm
+ *
+ * On success a reference to the mm is taken, and must be released with mmput().
+ *
+ * Returns the mm corresponding to this PASID, or an error if not found.
+ */
+struct mm_struct *iommu_sva_find(ioasid_t pasid)
+{
+ return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_find);
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
new file mode 100644
index 000000000000..b40990aef3fd
--- /dev/null
+++ b/drivers/iommu/iommu-sva-lib.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SVA library for IOMMU drivers
+ */
+#ifndef _IOMMU_SVA_LIB_H
+#define _IOMMU_SVA_LIB_H
+
+#include <linux/ioasid.h>
+#include <linux/mm_types.h>
+
+int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
+void iommu_sva_free_pasid(struct mm_struct *mm);
+struct mm_struct *iommu_sva_find(ioasid_t pasid);
+
+#endif /* _IOMMU_SVA_LIB_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 0f4dc25d46c9..ffeebda8d6de 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -93,6 +93,8 @@ static void __iommu_detach_group(struct iommu_domain *domain,
static int iommu_create_device_direct_mappings(struct iommu_group *group,
struct device *dev);
static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+static ssize_t iommu_group_store_type(struct iommu_group *group,
+ const char *buf, size_t count);
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
@@ -253,8 +255,10 @@ int iommu_probe_device(struct device *dev)
goto err_out;
group = iommu_group_get(dev);
- if (!group)
+ if (!group) {
+ ret = -ENODEV;
goto err_release;
+ }
/*
* Try to allocate a default domain - needs support from the
@@ -501,6 +505,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
{
char *type = "unknown\n";
+ mutex_lock(&group->mutex);
if (group->default_domain) {
switch (group->default_domain->type) {
case IOMMU_DOMAIN_BLOCKED:
@@ -517,6 +522,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
break;
}
}
+ mutex_unlock(&group->mutex);
strcpy(buf, type);
return strlen(type);
@@ -527,7 +533,8 @@ static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
static IOMMU_GROUP_ATTR(reserved_regions, 0444,
iommu_group_show_resv_regions, NULL);
-static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
+static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
+ iommu_group_store_type);
static void iommu_group_release(struct kobject *kobj)
{
@@ -739,6 +746,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
/* We need to consider overlapping regions for different devices */
list_for_each_entry(entry, &mappings, list) {
dma_addr_t start, end, addr;
+ size_t map_size = 0;
if (domain->ops->apply_resv_region)
domain->ops->apply_resv_region(dev, domain, entry);
@@ -750,16 +758,27 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
continue;
- for (addr = start; addr < end; addr += pg_size) {
+ for (addr = start; addr <= end; addr += pg_size) {
phys_addr_t phys_addr;
+ if (addr == end)
+ goto map_end;
+
phys_addr = iommu_iova_to_phys(domain, addr);
- if (phys_addr)
+ if (!phys_addr) {
+ map_size += pg_size;
continue;
+ }
- ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
- if (ret)
- goto out;
+map_end:
+ if (map_size) {
+ ret = iommu_map(domain, addr - map_size,
+ addr - map_size, map_size,
+ entry->prot);
+ if (ret)
+ goto out;
+ map_size = 0;
+ }
}
}
@@ -1462,12 +1481,14 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group);
static int iommu_get_def_domain_type(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
- unsigned int type = 0;
+
+ if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
+ return IOMMU_DOMAIN_DMA;
if (ops->def_domain_type)
- type = ops->def_domain_type(dev);
+ return ops->def_domain_type(dev);
- return (type == 0) ? iommu_def_domain_type : type;
+ return 0;
}
static int iommu_group_alloc_default_domain(struct bus_type *bus,
@@ -1509,7 +1530,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group,
if (group->default_domain)
return 0;
- type = iommu_get_def_domain_type(dev);
+ type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type;
return iommu_group_alloc_default_domain(dev->bus, group, type);
}
@@ -1647,12 +1668,8 @@ struct __group_domain_type {
static int probe_get_default_domain_type(struct device *dev, void *data)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
struct __group_domain_type *gtype = data;
- unsigned int type = 0;
-
- if (ops->def_domain_type)
- type = ops->def_domain_type(dev);
+ unsigned int type = iommu_get_def_domain_type(dev);
if (type) {
if (gtype->type && gtype->type != type) {
@@ -2997,8 +3014,6 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
* Put reference to a bond between device and address space. The device should
* not be issuing any more transaction for this PASID. All outstanding page
* requests for this PASID must have been flushed to the IOMMU.
- *
- * Returns 0 on success, or an error value
*/
void iommu_sva_unbind_device(struct iommu_sva *handle)
{
@@ -3031,3 +3046,228 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
return ops->sva_get_pasid(handle);
}
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
+
+/*
+ * Changes the default domain of an iommu group that has *only* one device
+ *
+ * @group: The group for which the default domain should be changed
+ * @prev_dev: The device in the group (this is used to make sure that the device
+ * hasn't changed after the caller has called this function)
+ * @type: The type of the new default domain that gets associated with the group
+ *
+ * Returns 0 on success and error code on failure
+ *
+ * Note:
+ * 1. Presently, this function is called only when user requests to change the
+ * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type
+ * Please take a closer look if intended to use for other purposes.
+ */
+static int iommu_change_dev_def_domain(struct iommu_group *group,
+ struct device *prev_dev, int type)
+{
+ struct iommu_domain *prev_dom;
+ struct group_device *grp_dev;
+ int ret, dev_def_dom;
+ struct device *dev;
+
+ if (!group)
+ return -EINVAL;
+
+ mutex_lock(&group->mutex);
+
+ if (group->default_domain != group->domain) {
+ dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * iommu group wasn't locked while acquiring device lock in
+ * iommu_group_store_type(). So, make sure that the device count hasn't
+ * changed while acquiring device lock.
+ *
+ * Changing default domain of an iommu group with two or more devices
+ * isn't supported because there could be a potential deadlock. Consider
+ * the following scenario. T1 is trying to acquire device locks of all
+ * the devices in the group and before it could acquire all of them,
+ * there could be another thread T2 (from different sub-system and use
+ * case) that has already acquired some of the device locks and might be
+ * waiting for T1 to release other device locks.
+ */
+ if (iommu_group_device_count(group) != 1) {
+ dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Since group has only one device */
+ grp_dev = list_first_entry(&group->devices, struct group_device, list);
+ dev = grp_dev->dev;
+
+ if (prev_dev != dev) {
+ dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ prev_dom = group->default_domain;
+ if (!prev_dom) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev_def_dom = iommu_get_def_domain_type(dev);
+ if (!type) {
+ /*
+ * If the user hasn't requested any specific type of domain and
+ * if the device supports both the domains, then default to the
+ * domain the device was booted with
+ */
+ type = dev_def_dom ? : iommu_def_domain_type;
+ } else if (dev_def_dom && type != dev_def_dom) {
+ dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
+ iommu_domain_type_str(type));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Switch to a new domain only if the requested domain type is different
+ * from the existing default domain type
+ */
+ if (prev_dom->type == type) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Sets group->default_domain to the newly allocated domain */
+ ret = iommu_group_alloc_default_domain(dev->bus, group, type);
+ if (ret)
+ goto out;
+
+ ret = iommu_create_device_direct_mappings(group, dev);
+ if (ret)
+ goto free_new_domain;
+
+ ret = __iommu_attach_device(group->default_domain, dev);
+ if (ret)
+ goto free_new_domain;
+
+ group->domain = group->default_domain;
+
+ /*
+ * Release the mutex here because ops->probe_finalize() call-back of
+ * some vendor IOMMU drivers calls arm_iommu_attach_device() which
+ * in-turn might call back into IOMMU core code, where it tries to take
+ * group->mutex, resulting in a deadlock.
+ */
+ mutex_unlock(&group->mutex);
+
+ /* Make sure dma_ops is appropriatley set */
+ iommu_group_do_probe_finalize(dev, group->default_domain);
+ iommu_domain_free(prev_dom);
+ return 0;
+
+free_new_domain:
+ iommu_domain_free(group->default_domain);
+ group->default_domain = prev_dom;
+ group->domain = prev_dom;
+
+out:
+ mutex_unlock(&group->mutex);
+
+ return ret;
+}
+
+/*
+ * Changing the default domain through sysfs requires the users to ubind the
+ * drivers from the devices in the iommu group. Return failure if this doesn't
+ * meet.
+ *
+ * We need to consider the race between this and the device release path.
+ * device_lock(dev) is used here to guarantee that the device release path
+ * will not be entered at the same time.
+ */
+static ssize_t iommu_group_store_type(struct iommu_group *group,
+ const char *buf, size_t count)
+{
+ struct group_device *grp_dev;
+ struct device *dev;
+ int ret, req_type;
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+
+ if (WARN_ON(!group))
+ return -EINVAL;
+
+ if (sysfs_streq(buf, "identity"))
+ req_type = IOMMU_DOMAIN_IDENTITY;
+ else if (sysfs_streq(buf, "DMA"))
+ req_type = IOMMU_DOMAIN_DMA;
+ else if (sysfs_streq(buf, "auto"))
+ req_type = 0;
+ else
+ return -EINVAL;
+
+ /*
+ * Lock/Unlock the group mutex here before device lock to
+ * 1. Make sure that the iommu group has only one device (this is a
+ * prerequisite for step 2)
+ * 2. Get struct *dev which is needed to lock device
+ */
+ mutex_lock(&group->mutex);
+ if (iommu_group_device_count(group) != 1) {
+ mutex_unlock(&group->mutex);
+ pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
+ return -EINVAL;
+ }
+
+ /* Since group has only one device */
+ grp_dev = list_first_entry(&group->devices, struct group_device, list);
+ dev = grp_dev->dev;
+ get_device(dev);
+
+ /*
+ * Don't hold the group mutex because taking group mutex first and then
+ * the device lock could potentially cause a deadlock as below. Assume
+ * two threads T1 and T2. T1 is trying to change default domain of an
+ * iommu group and T2 is trying to hot unplug a device or release [1] VF
+ * of a PCIe device which is in the same iommu group. T1 takes group
+ * mutex and before it could take device lock assume T2 has taken device
+ * lock and is yet to take group mutex. Now, both the threads will be
+ * waiting for the other thread to release lock. Below, lock order was
+ * suggested.
+ * device_lock(dev);
+ * mutex_lock(&group->mutex);
+ * iommu_change_dev_def_domain();
+ * mutex_unlock(&group->mutex);
+ * device_unlock(dev);
+ *
+ * [1] Typical device release path
+ * device_lock() from device/driver core code
+ * -> bus_notifier()
+ * -> iommu_bus_notifier()
+ * -> iommu_release_device()
+ * -> ops->release_device() vendor driver calls back iommu core code
+ * -> mutex_lock() from iommu core code
+ */
+ mutex_unlock(&group->mutex);
+
+ /* Check if the device in the group still has a driver bound to it */
+ device_lock(dev);
+ if (device_is_bound(dev)) {
+ pr_err_ratelimited("Device is still bound to driver\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iommu_change_dev_def_domain(group, dev, req_type);
+ ret = ret ?: count;
+
+out:
+ device_unlock(dev);
+ put_device(dev);
+
+ return ret;
+}
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 30d969a4c5fd..d20b8b333d30 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -25,6 +25,7 @@ static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);
+static void free_global_cached_iovas(struct iova_domain *iovad);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -184,8 +185,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
struct rb_node *curr, *prev;
struct iova *curr_iova;
unsigned long flags;
- unsigned long new_pfn;
+ unsigned long new_pfn, retry_pfn;
unsigned long align_mask = ~0UL;
+ unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
if (size_aligned)
align_mask <<= fls_long(size - 1);
@@ -198,15 +200,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node);
+ retry_pfn = curr_iova->pfn_hi + 1;
+
+retry:
do {
- limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
- new_pfn = (limit_pfn - size) & align_mask;
+ high_pfn = min(high_pfn, curr_iova->pfn_lo);
+ new_pfn = (high_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
curr_iova = rb_entry(curr, struct iova, node);
- } while (curr && new_pfn <= curr_iova->pfn_hi);
-
- if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+ } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
+
+ if (high_pfn < size || new_pfn < low_pfn) {
+ if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
+ high_pfn = limit_pfn;
+ low_pfn = retry_pfn;
+ curr = &iovad->anchor.node;
+ curr_iova = rb_entry(curr, struct iova, node);
+ goto retry;
+ }
iovad->max32_alloc_size = size;
goto iova32_full;
}
@@ -231,18 +243,16 @@ static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);
-struct iova *alloc_iova_mem(void)
+static struct iova *alloc_iova_mem(void)
{
return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
}
-EXPORT_SYMBOL(alloc_iova_mem);
-void free_iova_mem(struct iova *iova)
+static void free_iova_mem(struct iova *iova)
{
if (iova->pfn_lo != IOVA_ANCHOR)
kmem_cache_free(iova_cache, iova);
}
-EXPORT_SYMBOL(free_iova_mem);
int iova_cache_get(void)
{
@@ -348,7 +358,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
* @iovad: - iova domain in question.
* @pfn: - page frame number
* This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
+ * given domain which matches the given pfn.
*/
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
{
@@ -390,10 +400,14 @@ EXPORT_SYMBOL_GPL(__free_iova);
void
free_iova(struct iova_domain *iovad, unsigned long pfn)
{
- struct iova *iova = find_iova(iovad, pfn);
+ unsigned long flags;
+ struct iova *iova;
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ iova = private_find_iova(iovad, pfn);
if (iova)
- __free_iova(iovad, iova);
+ private_free_iova(iovad, iova);
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
EXPORT_SYMBOL_GPL(free_iova);
@@ -431,6 +445,7 @@ retry:
flush_rcache = false;
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
+ free_global_cached_iovas(iovad);
goto retry;
}
@@ -586,7 +601,7 @@ void queue_iova(struct iova_domain *iovad,
EXPORT_SYMBOL_GPL(queue_iova);
/**
- * put_iova_domain - destroys the iova doamin
+ * put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
* All the iova's in that domain are destroyed.
*/
@@ -697,9 +712,9 @@ EXPORT_SYMBOL_GPL(reserve_iova);
/**
* copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
+ * @from: - source domain from where to copy
* @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
+ * This function copies reserved iova's from one domain to
* other.
*/
void
@@ -725,47 +740,6 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
}
EXPORT_SYMBOL_GPL(copy_reserved_iova);
-struct iova *
-split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
- unsigned long pfn_lo, unsigned long pfn_hi)
-{
- unsigned long flags;
- struct iova *prev = NULL, *next = NULL;
-
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- if (iova->pfn_lo < pfn_lo) {
- prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
- if (prev == NULL)
- goto error;
- }
- if (iova->pfn_hi > pfn_hi) {
- next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
- if (next == NULL)
- goto error;
- }
-
- __cached_rbnode_delete_update(iovad, iova);
- rb_erase(&iova->node, &iovad->rbroot);
-
- if (prev) {
- iova_insert_rbtree(&iovad->rbroot, prev, NULL);
- iova->pfn_lo = pfn_lo;
- }
- if (next) {
- iova_insert_rbtree(&iovad->rbroot, next, NULL);
- iova->pfn_hi = pfn_hi;
- }
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
- return iova;
-
-error:
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- if (prev)
- free_iova_mem(prev);
- return NULL;
-}
-
/*
* Magazine caches for IOVA ranges. For an introduction to magazines,
* see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
@@ -1046,5 +1020,25 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
}
}
+/*
+ * free all the IOVA ranges of global cache
+ */
+static void free_global_cached_iovas(struct iova_domain *iovad)
+{
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ spin_lock_irqsave(&rcache->lock, flags);
+ for (j = 0; j < rcache->depot_size; ++j) {
+ iova_magazine_free_pfns(rcache->depot[j], iovad);
+ iova_magazine_free(rcache->depot[j]);
+ }
+ rcache->depot_size = 0;
+ spin_unlock_irqrestore(&rcache->lock, flags);
+ }
+}
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 0f18abda0e20..d71f10257f15 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -325,7 +325,6 @@ static void ipmmu_tlb_flush(unsigned long iova, size_t size,
static const struct iommu_flush_ops ipmmu_flush_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all,
.tlb_flush_walk = ipmmu_tlb_flush,
- .tlb_flush_leaf = ipmmu_tlb_flush,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 3615cd6241c4..040e85f70861 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -174,12 +174,6 @@ static void __flush_iotlb_walk(unsigned long iova, size_t size,
__flush_iotlb_range(iova, size, granule, false, cookie);
}
-static void __flush_iotlb_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- __flush_iotlb_range(iova, size, granule, true, cookie);
-}
-
static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie)
{
@@ -189,7 +183,6 @@ static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops msm_iommu_flush_ops = {
.tlb_flush_all = __flush_iotlb,
.tlb_flush_walk = __flush_iotlb_walk,
- .tlb_flush_leaf = __flush_iotlb_leaf,
.tlb_add_page = __flush_iotlb_page,
};
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index c072cee532c2..8e56cec532e7 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -240,7 +240,6 @@ static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
.tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
- .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
};
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 0becdbfea306..4a3f095a1c26 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -256,26 +257,19 @@ static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
{
unsigned long id;
- mutex_lock(&smmu->lock);
-
id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
- if (id >= smmu->soc->num_asids) {
- mutex_unlock(&smmu->lock);
+ if (id >= smmu->soc->num_asids)
return -ENOSPC;
- }
set_bit(id, smmu->asids);
*idp = id;
- mutex_unlock(&smmu->lock);
return 0;
}
static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
{
- mutex_lock(&smmu->lock);
clear_bit(id, smmu->asids);
- mutex_unlock(&smmu->lock);
}
static bool tegra_smmu_capable(enum iommu_cap cap)
@@ -420,17 +414,21 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
struct tegra_smmu_as *as)
{
u32 value;
- int err;
+ int err = 0;
+
+ mutex_lock(&smmu->lock);
if (as->use_count > 0) {
as->use_count++;
- return 0;
+ goto unlock;
}
as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
DMA_TO_DEVICE);
- if (dma_mapping_error(smmu->dev, as->pd_dma))
- return -ENOMEM;
+ if (dma_mapping_error(smmu->dev, as->pd_dma)) {
+ err = -ENOMEM;
+ goto unlock;
+ }
/* We can't handle 64-bit DMA addresses */
if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
@@ -453,83 +451,84 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
as->smmu = smmu;
as->use_count++;
+ mutex_unlock(&smmu->lock);
+
return 0;
err_unmap:
dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+unlock:
+ mutex_unlock(&smmu->lock);
+
return err;
}
static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
struct tegra_smmu_as *as)
{
- if (--as->use_count > 0)
+ mutex_lock(&smmu->lock);
+
+ if (--as->use_count > 0) {
+ mutex_unlock(&smmu->lock);
return;
+ }
tegra_smmu_free_asid(smmu, as->id);
dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
as->smmu = NULL;
+
+ mutex_unlock(&smmu->lock);
}
static int tegra_smmu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct tegra_smmu_as *as = to_smmu_as(domain);
- struct device_node *np = dev->of_node;
- struct of_phandle_args args;
- unsigned int index = 0;
- int err = 0;
-
- while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
- &args)) {
- unsigned int swgroup = args.args[0];
-
- if (args.np != smmu->dev->of_node) {
- of_node_put(args.np);
- continue;
- }
+ unsigned int index;
+ int err;
- of_node_put(args.np);
+ if (!fwspec)
+ return -ENOENT;
+ for (index = 0; index < fwspec->num_ids; index++) {
err = tegra_smmu_as_prepare(smmu, as);
- if (err < 0)
- return err;
+ if (err)
+ goto disable;
- tegra_smmu_enable(smmu, swgroup, as->id);
- index++;
+ tegra_smmu_enable(smmu, fwspec->ids[index], as->id);
}
if (index == 0)
return -ENODEV;
return 0;
+
+disable:
+ while (index--) {
+ tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
+ tegra_smmu_as_unprepare(smmu, as);
+ }
+
+ return err;
}
static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct tegra_smmu_as *as = to_smmu_as(domain);
- struct device_node *np = dev->of_node;
struct tegra_smmu *smmu = as->smmu;
- struct of_phandle_args args;
- unsigned int index = 0;
-
- while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
- &args)) {
- unsigned int swgroup = args.args[0];
-
- if (args.np != smmu->dev->of_node) {
- of_node_put(args.np);
- continue;
- }
+ unsigned int index;
- of_node_put(args.np);
+ if (!fwspec)
+ return;
- tegra_smmu_disable(smmu, swgroup, as->id);
+ for (index = 0; index < fwspec->num_ids; index++) {
+ tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
tegra_smmu_as_unprepare(smmu, as);
- index++;
}
}
@@ -799,75 +798,9 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
}
-static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
-{
- struct platform_device *pdev;
- struct tegra_mc *mc;
-
- pdev = of_find_device_by_node(np);
- if (!pdev)
- return NULL;
-
- mc = platform_get_drvdata(pdev);
- if (!mc)
- return NULL;
-
- return mc->smmu;
-}
-
-static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
- struct of_phandle_args *args)
-{
- const struct iommu_ops *ops = smmu->iommu.ops;
- int err;
-
- err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
- if (err < 0) {
- dev_err(dev, "failed to initialize fwspec: %d\n", err);
- return err;
- }
-
- err = ops->of_xlate(dev, args);
- if (err < 0) {
- dev_err(dev, "failed to parse SW group ID: %d\n", err);
- iommu_fwspec_free(dev);
- return err;
- }
-
- return 0;
-}
-
static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
{
- struct device_node *np = dev->of_node;
- struct tegra_smmu *smmu = NULL;
- struct of_phandle_args args;
- unsigned int index = 0;
- int err;
-
- while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
- &args) == 0) {
- smmu = tegra_smmu_find(args.np);
- if (smmu) {
- err = tegra_smmu_configure(smmu, dev, &args);
- of_node_put(args.np);
-
- if (err < 0)
- return ERR_PTR(err);
-
- /*
- * Only a single IOMMU master interface is currently
- * supported by the Linux kernel, so abort after the
- * first match.
- */
- dev_iommu_priv_set(dev, smmu);
-
- break;
- }
-
- of_node_put(args.np);
- index++;
- }
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
if (!smmu)
return ERR_PTR(-ENODEV);
@@ -875,10 +808,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
return &smmu->iommu;
}
-static void tegra_smmu_release_device(struct device *dev)
-{
- dev_iommu_priv_set(dev, NULL);
-}
+static void tegra_smmu_release_device(struct device *dev) {}
static const struct tegra_smmu_group_soc *
tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
@@ -903,10 +833,12 @@ static void tegra_smmu_group_release(void *iommu_data)
mutex_unlock(&smmu->lock);
}
-static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
- unsigned int swgroup)
+static struct iommu_group *tegra_smmu_device_group(struct device *dev)
{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
const struct tegra_smmu_group_soc *soc;
+ unsigned int swgroup = fwspec->ids[0];
struct tegra_smmu_group *group;
struct iommu_group *grp;
@@ -934,7 +866,11 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
group->smmu = smmu;
group->soc = soc;
- group->group = iommu_group_alloc();
+ if (dev_is_pci(dev))
+ group->group = pci_device_group(dev);
+ else
+ group->group = generic_device_group(dev);
+
if (IS_ERR(group->group)) {
devm_kfree(smmu->dev, group);
mutex_unlock(&smmu->lock);
@@ -950,24 +886,24 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
return group->group;
}
-static struct iommu_group *tegra_smmu_device_group(struct device *dev)
-{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
- struct iommu_group *group;
-
- group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
- if (!group)
- group = generic_device_group(dev);
-
- return group;
-}
-
static int tegra_smmu_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
+ struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
+ struct tegra_mc *mc = platform_get_drvdata(iommu_pdev);
u32 id = args->args[0];
+ /*
+ * Note: we are here releasing the reference of &iommu_pdev->dev, which
+ * is mc->dev. Although some functions in tegra_smmu_ops may keep using
+ * its private data beyond this point, it's still safe to do so because
+ * the SMMU parent device is the same as the MC, so the reference count
+ * isn't strictly necessary.
+ */
+ put_device(&iommu_pdev->dev);
+
+ dev_iommu_priv_set(dev, mc->smmu);
+
return iommu_fwspec_add_ids(dev, &id, 1);
}
@@ -1092,16 +1028,6 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
if (!smmu)
return ERR_PTR(-ENOMEM);
- /*
- * This is a bit of a hack. Ideally we'd want to simply return this
- * value. However the IOMMU registration process will attempt to add
- * all devices to the IOMMU when bus_set_iommu() is called. In order
- * not to rely on global variables to track the IOMMU instance, we
- * set it here so that it can be looked up from the .probe_device()
- * callback via the IOMMU device's .drvdata field.
- */
- mc->smmu = smmu;
-
size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
@@ -1154,22 +1080,32 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
err = iommu_device_register(&smmu->iommu);
- if (err) {
- iommu_device_sysfs_remove(&smmu->iommu);
- return ERR_PTR(err);
- }
+ if (err)
+ goto remove_sysfs;
err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
- if (err < 0) {
- iommu_device_unregister(&smmu->iommu);
- iommu_device_sysfs_remove(&smmu->iommu);
- return ERR_PTR(err);
- }
+ if (err < 0)
+ goto unregister;
+
+#ifdef CONFIG_PCI
+ err = bus_set_iommu(&pci_bus_type, &tegra_smmu_ops);
+ if (err < 0)
+ goto unset_platform_bus;
+#endif
if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_smmu_debugfs_init(smmu);
return smmu;
+
+unset_platform_bus: __maybe_unused;
+ bus_set_iommu(&platform_bus_type, NULL);
+unregister:
+ iommu_device_unregister(&smmu->iommu);
+remove_sysfs:
+ iommu_device_sysfs_remove(&smmu->iommu);
+
+ return ERR_PTR(err);
}
void tegra_smmu_remove(struct tegra_smmu *smmu)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 94920a51c628..b147f22a78f4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -493,8 +493,9 @@ config TI_SCI_INTA_IRQCHIP
TI System Controller, say Y here. Otherwise, say N.
config TI_PRUSS_INTC
- tristate "TI PRU-ICSS Interrupt Controller"
- depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ tristate
+ depends on TI_PRUSS
+ default TI_PRUSS
select IRQ_DOMAIN
help
This enables support for the PRU-ICSS Local Interrupt Controller
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 5f5eb8877c41..25c9a9c06e41 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
{
int cpu = smp_processor_id();
@@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = {
.name = "IPI",
.irq_mask = bcm2836_arm_irqchip_dummy_op,
.irq_unmask = bcm2836_arm_irqchip_dummy_op,
- .irq_eoi = bcm2836_arm_irqchip_ipi_eoi,
+ .irq_ack = bcm2836_arm_irqchip_ipi_ack,
.ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask,
};
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c951ad24d377..ed46e6057e33 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3844,8 +3844,6 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
val |= GICR_VPENDBASER_Valid;
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
- its_wait_vpt_parse_complete();
}
static void its_vpe_deschedule(struct its_vpe *vpe)
@@ -3893,6 +3891,10 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
its_vpe_deschedule(vpe);
return 0;
+ case COMMIT_VPE:
+ its_wait_vpt_parse_complete();
+ return 0;
+
case INVALL_VPE:
its_vpe_invall(vpe);
return 0;
@@ -4054,8 +4056,6 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
- its_wait_vpt_parse_complete();
}
static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
@@ -4130,6 +4130,10 @@ static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
its_vpe_4_1_deschedule(vpe, info);
return 0;
+ case COMMIT_VPE:
+ its_wait_vpt_parse_complete();
+ return 0;
+
case INVALL_VPE:
its_vpe_4_1_invall(vpe);
return 0;
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 0c18714ae13e..5d1dc9915272 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -232,6 +232,8 @@ int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
if (!ret)
vpe->resident = false;
+ vpe->ready = false;
+
return ret;
}
@@ -258,6 +260,23 @@ int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
return ret;
}
+int its_commit_vpe(struct its_vpe *vpe)
+{
+ struct its_cmd_info info = {
+ .cmd_type = COMMIT_VPE,
+ };
+ int ret;
+
+ WARN_ON(preemptible());
+
+ ret = its_send_vpe_cmd(vpe, &info);
+ if (!ret)
+ vpe->ready = true;
+
+ return ret;
+}
+
+
int its_invall_vpe(struct its_vpe *vpe)
{
struct its_cmd_info info = {
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 9ed1bc473663..09b91b81851c 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -142,8 +142,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
-int __init liointc_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init liointc_of_init(struct device_node *node,
+ struct device_node *parent)
{
struct irq_chip_generic *gc;
struct irq_domain *domain;
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 95d4fd8f7a96..0bbb0b2d0dd5 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
+ ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+ &mips_mt_cpu_irq_controller,
+ NULL);
+
+ if (ret)
+ return ret;
+
ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
if (ret)
return ret;
diff --git a/drivers/irqchip/irq-sl28cpld.c b/drivers/irqchip/irq-sl28cpld.c
index 0aa50d025ef6..fbb354413ffa 100644
--- a/drivers/irqchip/irq-sl28cpld.c
+++ b/drivers/irqchip/irq-sl28cpld.c
@@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev)
irqchip->chip.num_regs = 1;
irqchip->chip.status_base = base + INTC_IP;
irqchip->chip.mask_base = base + INTC_IE;
- irqchip->chip.mask_invert = true,
+ irqchip->chip.mask_invert = true;
irqchip->chip.ack_base = base + INTC_IP;
return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
index 26cf0ac9c4ad..c9a53c222472 100644
--- a/drivers/isdn/mISDN/Kconfig
+++ b/drivers/isdn/mISDN/Kconfig
@@ -13,6 +13,7 @@ if MISDN != n
config MISDN_DSP
tristate "Digital Audio Processing of transparent data"
depends on MISDN
+ select BITREVERSE
help
Enable support for digital audio processing capability.
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 849d3c5f908e..6c1d8b69a465 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -928,6 +928,9 @@ config LEDS_ACER_A500
This option enables support for the Power Button LED of
Acer Iconia Tab A500.
+comment "Flash and Torch LED drivers"
+source "drivers/leds/flash/Kconfig"
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 73e603e1727e..156c0b4e60d9 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -103,5 +103,8 @@ obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o
# LED Userspace Drivers
obj-$(CONFIG_LEDS_USER) += uleds.o
+# Flash and Torch LED Drivers
+obj-$(CONFIG_LEDS_CLASS_FLASH) += flash/
+
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
new file mode 100644
index 000000000000..d21d273ef3da
--- /dev/null
+++ b/drivers/leds/flash/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if LEDS_CLASS_FLASH
+
+config LEDS_RT8515
+ tristate "LED support for Richtek RT8515 flash/torch LED"
+ depends on GPIOLIB
+ help
+ This option enables support for the Richtek RT8515 flash
+ and torch LEDs found on some mobile phones.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-rt8515.
+
+endif # LEDS_CLASS_FLASH
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
new file mode 100644
index 000000000000..e990e257f4d7
--- /dev/null
+++ b/drivers/leds/flash/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
diff --git a/drivers/leds/flash/leds-rt8515.c b/drivers/leds/flash/leds-rt8515.c
new file mode 100644
index 000000000000..590bfa180d10
--- /dev/null
+++ b/drivers/leds/flash/leds-rt8515.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LED driver for Richtek RT8515 flash/torch white LEDs
+ * found on some Samsung mobile phones.
+ *
+ * This is a 1.5A Boost dual channel driver produced around 2011.
+ *
+ * The component lacks a datasheet, but in the schematic picture
+ * from the LG P970 service manual you can see the connections
+ * from the RT8515 to the LED, with two resistors connected
+ * from the pins "RFS" and "RTS" to ground.
+ *
+ * On the LG P970:
+ * RFS (resistance flash setting?) is 20 kOhm
+ * RTS (resistance torch setting?) is 39 kOhm
+ *
+ * Some sleuthing finds us the RT9387A which we have a datasheet for:
+ * https://static5.arrow.com/pdfs/2014/7/27/8/21/12/794/rtt_/manual/94download_ds.jspprt9387a.jspprt9387a.pdf
+ * This apparently works the same way so in theory this driver
+ * should cover RT9387A as well. This has not been tested, please
+ * update the compatibles if you add RT9387A support.
+ *
+ * Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-flash-led-class.h>
+
+/* We can provide 15-700 mA out to the LED */
+#define RT8515_MIN_IOUT_MA 15
+#define RT8515_MAX_IOUT_MA 700
+/* The maximum intensity is 1-16 for flash and 1-100 for torch */
+#define RT8515_FLASH_MAX 16
+#define RT8515_TORCH_MAX 100
+
+#define RT8515_TIMEOUT_US 250000U
+#define RT8515_MAX_TIMEOUT_US 300000U
+
+struct rt8515 {
+ struct led_classdev_flash fled;
+ struct device *dev;
+ struct v4l2_flash *v4l2_flash;
+ struct mutex lock;
+ struct regulator *reg;
+ struct gpio_desc *enable_torch;
+ struct gpio_desc *enable_flash;
+ struct timer_list powerdown_timer;
+ u32 max_timeout; /* Flash max timeout */
+ int flash_max_intensity;
+ int torch_max_intensity;
+};
+
+static struct rt8515 *to_rt8515(struct led_classdev_flash *fled)
+{
+ return container_of(fled, struct rt8515, fled);
+}
+
+static void rt8515_gpio_led_off(struct rt8515 *rt)
+{
+ gpiod_set_value(rt->enable_flash, 0);
+ gpiod_set_value(rt->enable_torch, 0);
+}
+
+static void rt8515_gpio_brightness_commit(struct gpio_desc *gpiod,
+ int brightness)
+{
+ int i;
+
+ /*
+ * Toggling a GPIO line with a small delay increases the
+ * brightness one step at a time.
+ */
+ for (i = 0; i < brightness; i++) {
+ gpiod_set_value(gpiod, 0);
+ udelay(1);
+ gpiod_set_value(gpiod, 1);
+ udelay(1);
+ }
+}
+
+/* This is setting the torch light level */
+static int rt8515_led_brightness_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled = lcdev_to_flcdev(led);
+ struct rt8515 *rt = to_rt8515(fled);
+
+ mutex_lock(&rt->lock);
+
+ if (brightness == LED_OFF) {
+ /* Off */
+ rt8515_gpio_led_off(rt);
+ } else if (brightness < RT8515_TORCH_MAX) {
+ /* Step it up to movie mode brightness using the flash pin */
+ rt8515_gpio_brightness_commit(rt->enable_torch, brightness);
+ } else {
+ /* Max torch brightness requested */
+ gpiod_set_value(rt->enable_torch, 1);
+ }
+
+ mutex_unlock(&rt->lock);
+
+ return 0;
+}
+
+static int rt8515_led_flash_strobe_set(struct led_classdev_flash *fled,
+ bool state)
+{
+ struct rt8515 *rt = to_rt8515(fled);
+ struct led_flash_setting *timeout = &fled->timeout;
+ int brightness = rt->flash_max_intensity;
+
+ mutex_lock(&rt->lock);
+
+ if (state) {
+ /* Enable LED flash mode and set brightness */
+ rt8515_gpio_brightness_commit(rt->enable_flash, brightness);
+ /* Set timeout */
+ mod_timer(&rt->powerdown_timer,
+ jiffies + usecs_to_jiffies(timeout->val));
+ } else {
+ del_timer_sync(&rt->powerdown_timer);
+ /* Turn the LED off */
+ rt8515_gpio_led_off(rt);
+ }
+
+ fled->led_cdev.brightness = LED_OFF;
+ /* After this the torch LED will be disabled */
+
+ mutex_unlock(&rt->lock);
+
+ return 0;
+}
+
+static int rt8515_led_flash_strobe_get(struct led_classdev_flash *fled,
+ bool *state)
+{
+ struct rt8515 *rt = to_rt8515(fled);
+
+ *state = timer_pending(&rt->powerdown_timer);
+
+ return 0;
+}
+
+static int rt8515_led_flash_timeout_set(struct led_classdev_flash *fled,
+ u32 timeout)
+{
+ /* The timeout is stored in the led-class-flash core */
+ return 0;
+}
+
+static const struct led_flash_ops rt8515_flash_ops = {
+ .strobe_set = rt8515_led_flash_strobe_set,
+ .strobe_get = rt8515_led_flash_strobe_get,
+ .timeout_set = rt8515_led_flash_timeout_set,
+};
+
+static void rt8515_powerdown_timer(struct timer_list *t)
+{
+ struct rt8515 *rt = from_timer(rt, t, powerdown_timer);
+
+ /* Turn the LED off */
+ rt8515_gpio_led_off(rt);
+}
+
+static void rt8515_init_flash_timeout(struct rt8515 *rt)
+{
+ struct led_classdev_flash *fled = &rt->fled;
+ struct led_flash_setting *s;
+
+ /* Init flash timeout setting */
+ s = &fled->timeout;
+ s->min = 1;
+ s->max = rt->max_timeout;
+ s->step = 1;
+ /*
+ * Set default timeout to RT8515_TIMEOUT_US except if
+ * max_timeout from DT is lower.
+ */
+ s->val = min(rt->max_timeout, RT8515_TIMEOUT_US);
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+/* Configure the V2L2 flash subdevice */
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+ struct led_classdev *led = &rt->fled.led_cdev;
+ struct led_flash_setting *s;
+
+ strscpy(v4l2_sd_cfg->dev_name, led->dev->kobj.name,
+ sizeof(v4l2_sd_cfg->dev_name));
+
+ /*
+ * Init flash intensity setting: this is a linear scale
+ * capped from the device tree max intensity setting
+ * 1..flash_max_intensity
+ */
+ s = &v4l2_sd_cfg->intensity;
+ s->min = 1;
+ s->max = rt->flash_max_intensity;
+ s->step = 1;
+ s->val = s->max;
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+ v4l2_flash_release(rt->v4l2_flash);
+}
+
+#else
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+}
+#endif
+
+static void rt8515_determine_max_intensity(struct rt8515 *rt,
+ struct fwnode_handle *led,
+ const char *resistance,
+ const char *max_ua_prop, int hw_max,
+ int *max_intensity_setting)
+{
+ u32 res = 0; /* Can't be 0 so 0 is undefined */
+ u32 ua;
+ u32 max_ma;
+ int max_intensity;
+ int ret;
+
+ fwnode_property_read_u32(rt->dev->fwnode, resistance, &res);
+ ret = fwnode_property_read_u32(led, max_ua_prop, &ua);
+
+ /* Missing info in DT, OK go with hardware maxima */
+ if (ret || res == 0) {
+ dev_err(rt->dev,
+ "either %s or %s missing from DT, using HW max\n",
+ resistance, max_ua_prop);
+ max_ma = RT8515_MAX_IOUT_MA;
+ max_intensity = hw_max;
+ goto out_assign_max;
+ }
+
+ /*
+ * Formula from the datasheet, this is the maximum current
+ * defined by the hardware.
+ */
+ max_ma = (5500 * 1000) / res;
+ /*
+ * Calculate max intensity (linear scaling)
+ * Formula is ((ua / 1000) / max_ma) * 100, then simplified
+ */
+ max_intensity = (ua / 10) / max_ma;
+
+ dev_info(rt->dev,
+ "current restricted from %u to %u mA, max intensity %d/100\n",
+ max_ma, (ua / 1000), max_intensity);
+
+out_assign_max:
+ dev_info(rt->dev, "max intensity %d/%d = %d mA\n",
+ max_intensity, hw_max, max_ma);
+ *max_intensity_setting = max_intensity;
+}
+
+static int rt8515_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *child;
+ struct rt8515 *rt;
+ struct led_classdev *led;
+ struct led_classdev_flash *fled;
+ struct led_init_data init_data = {};
+ struct v4l2_flash_config v4l2_sd_cfg = {};
+ int ret;
+
+ rt = devm_kzalloc(dev, sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ return -ENOMEM;
+
+ rt->dev = dev;
+ fled = &rt->fled;
+ led = &fled->led_cdev;
+
+ /* ENF - Enable Flash line */
+ rt->enable_flash = devm_gpiod_get(dev, "enf", GPIOD_OUT_LOW);
+ if (IS_ERR(rt->enable_flash))
+ return dev_err_probe(dev, PTR_ERR(rt->enable_flash),
+ "cannot get ENF (enable flash) GPIO\n");
+
+ /* ENT - Enable Torch line */
+ rt->enable_torch = devm_gpiod_get(dev, "ent", GPIOD_OUT_LOW);
+ if (IS_ERR(rt->enable_torch))
+ return dev_err_probe(dev, PTR_ERR(rt->enable_torch),
+ "cannot get ENT (enable torch) GPIO\n");
+
+ child = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+ if (!child) {
+ dev_err(dev,
+ "No fwnode child node found for connected LED.\n");
+ return -EINVAL;
+ }
+ init_data.fwnode = child;
+
+ rt8515_determine_max_intensity(rt, child, "richtek,rfs-ohms",
+ "flash-max-microamp",
+ RT8515_FLASH_MAX,
+ &rt->flash_max_intensity);
+ rt8515_determine_max_intensity(rt, child, "richtek,rts-ohms",
+ "led-max-microamp",
+ RT8515_TORCH_MAX,
+ &rt->torch_max_intensity);
+
+ ret = fwnode_property_read_u32(child, "flash-max-timeout-us",
+ &rt->max_timeout);
+ if (ret) {
+ rt->max_timeout = RT8515_MAX_TIMEOUT_US;
+ dev_warn(dev,
+ "flash-max-timeout-us property missing\n");
+ }
+ timer_setup(&rt->powerdown_timer, rt8515_powerdown_timer, 0);
+ rt8515_init_flash_timeout(rt);
+
+ fled->ops = &rt8515_flash_ops;
+
+ led->max_brightness = rt->torch_max_intensity;
+ led->brightness_set_blocking = rt8515_led_brightness_set;
+ led->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH;
+
+ mutex_init(&rt->lock);
+
+ platform_set_drvdata(pdev, rt);
+
+ ret = devm_led_classdev_flash_register_ext(dev, fled, &init_data);
+ if (ret) {
+ dev_err(dev, "can't register LED %s\n", led->name);
+ mutex_destroy(&rt->lock);
+ return ret;
+ }
+
+ rt8515_init_v4l2_flash_config(rt, &v4l2_sd_cfg);
+
+ /* Create a V4L2 Flash device if V4L2 flash is enabled */
+ rt->v4l2_flash = v4l2_flash_init(dev, child, fled, NULL, &v4l2_sd_cfg);
+ if (IS_ERR(rt->v4l2_flash)) {
+ ret = PTR_ERR(rt->v4l2_flash);
+ dev_err(dev, "failed to register V4L2 flash device (%d)\n",
+ ret);
+ /*
+ * Continue without the V4L2 flash
+ * (we still have the classdev)
+ */
+ }
+
+ return 0;
+}
+
+static int rt8515_remove(struct platform_device *pdev)
+{
+ struct rt8515 *rt = platform_get_drvdata(pdev);
+
+ rt8515_v4l2_flash_release(rt);
+ del_timer_sync(&rt->powerdown_timer);
+ mutex_destroy(&rt->lock);
+
+ return 0;
+}
+
+static const struct of_device_id rt8515_match[] = {
+ { .compatible = "richtek,rt8515", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rt8515_match);
+
+static struct platform_driver rt8515_driver = {
+ .driver = {
+ .name = "rt8515",
+ .of_match_table = rt8515_match,
+ },
+ .probe = rt8515_probe,
+ .remove = rt8515_remove,
+};
+module_platform_driver(rt8515_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Richtek RT8515 LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 91da90cfb11d..4e7b78a84149 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -378,14 +378,15 @@ void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@@ -396,11 +397,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int invert)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
@@ -408,7 +410,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
else
led_blink_set(led_cdev, delay_on, delay_off);
}
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
void led_trigger_blink(struct led_trigger *trig,
diff --git a/drivers/leds/leds-ariel.c b/drivers/leds/leds-ariel.c
index bb68ba23a7d4..49e1bddaa15e 100644
--- a/drivers/leds/leds-ariel.c
+++ b/drivers/leds/leds-ariel.c
@@ -96,14 +96,14 @@ static int ariel_led_probe(struct platform_device *pdev)
return -ENOMEM;
leds[0].ec_index = EC_BLUE_LED;
- leds[0].led_cdev.name = "blue:power",
+ leds[0].led_cdev.name = "blue:power";
leds[0].led_cdev.default_trigger = "default-on";
leds[1].ec_index = EC_AMBER_LED;
- leds[1].led_cdev.name = "amber:status",
+ leds[1].led_cdev.name = "amber:status";
leds[2].ec_index = EC_GREEN_LED;
- leds[2].led_cdev.name = "green:status",
+ leds[2].led_cdev.name = "green:status";
leds[2].led_cdev.default_trigger = "default-on";
for (i = 0; i < NLEDS; i++) {
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index b3edee703193..9dd205870525 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -679,7 +679,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
led->cdev.brightness_get = lm3533_led_get;
led->cdev.blink_set = lm3533_led_blink_set;
led->cdev.brightness = LED_OFF;
- led->cdev.groups = lm3533_led_attribute_groups,
+ led->cdev.groups = lm3533_led_attribute_groups;
led->id = pdev->id;
mutex_init(&led->mutex);
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index 5fb4f24aeb2e..f13117eed976 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -487,8 +487,10 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
*/
mc_led_info = devm_kcalloc(priv->dev, LP50XX_LEDS_PER_MODULE,
sizeof(*mc_led_info), GFP_KERNEL);
- if (!mc_led_info)
- return -ENOMEM;
+ if (!mc_led_info) {
+ ret = -ENOMEM;
+ goto child_out;
+ }
fwnode_for_each_child_node(child, led_node) {
ret = fwnode_property_read_u32(led_node, "color",
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index e6fd47365b58..68fbf0b66fad 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -448,31 +448,39 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
gpio_ext = devm_kzalloc(dev, sizeof(*gpio_ext), GFP_KERNEL);
if (!gpio_ext) {
of_node_put(gpio_ext_np);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_device;
}
ret = netxbig_gpio_ext_get(dev, gpio_ext_dev, gpio_ext);
of_node_put(gpio_ext_np);
if (ret)
- return ret;
+ goto put_device;
pdata->gpio_ext = gpio_ext;
/* Timers (optional) */
ret = of_property_count_u32_elems(np, "timers");
if (ret > 0) {
- if (ret % 3)
- return -EINVAL;
+ if (ret % 3) {
+ ret = -EINVAL;
+ goto put_device;
+ }
+
num_timers = ret / 3;
timers = devm_kcalloc(dev, num_timers, sizeof(*timers),
GFP_KERNEL);
- if (!timers)
- return -ENOMEM;
+ if (!timers) {
+ ret = -ENOMEM;
+ goto put_device;
+ }
for (i = 0; i < num_timers; i++) {
u32 tmp;
of_property_read_u32_index(np, "timers", 3 * i,
&timers[i].mode);
- if (timers[i].mode >= NETXBIG_LED_MODE_NUM)
- return -EINVAL;
+ if (timers[i].mode >= NETXBIG_LED_MODE_NUM) {
+ ret = -EINVAL;
+ goto put_device;
+ }
of_property_read_u32_index(np, "timers",
3 * i + 1, &tmp);
timers[i].delay_on = tmp;
@@ -488,12 +496,15 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
num_leds = of_get_available_child_count(np);
if (!num_leds) {
dev_err(dev, "No LED subnodes found in DT\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto put_device;
}
leds = devm_kcalloc(dev, num_leds, sizeof(*leds), GFP_KERNEL);
- if (!leds)
- return -ENOMEM;
+ if (!leds) {
+ ret = -ENOMEM;
+ goto put_device;
+ }
led = leds;
for_each_available_child_of_node(np, child) {
@@ -574,6 +585,8 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
err_node_put:
of_node_put(child);
+put_device:
+ put_device(gpio_ext_dev);
return ret;
}
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 8c5bdc3847ee..7b2f4d0ae3fe 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -12,28 +12,20 @@
#include <linux/of.h>
#include "leds.h"
-#define OMNIA_BOARD_LEDS 12
-#define OMNIA_LED_NUM_CHANNELS 3
+#define OMNIA_BOARD_LEDS 12
+#define OMNIA_LED_NUM_CHANNELS 3
-#define CMD_LED_MODE 3
-#define CMD_LED_MODE_LED(l) ((l) & 0x0f)
-#define CMD_LED_MODE_USER 0x10
+#define CMD_LED_MODE 3
+#define CMD_LED_MODE_LED(l) ((l) & 0x0f)
+#define CMD_LED_MODE_USER 0x10
-#define CMD_LED_STATE 4
-#define CMD_LED_STATE_LED(l) ((l) & 0x0f)
-#define CMD_LED_STATE_ON 0x10
+#define CMD_LED_STATE 4
+#define CMD_LED_STATE_LED(l) ((l) & 0x0f)
+#define CMD_LED_STATE_ON 0x10
-#define CMD_LED_COLOR 5
-#define CMD_LED_SET_BRIGHTNESS 7
-#define CMD_LED_GET_BRIGHTNESS 8
-
-#define OMNIA_CMD 0
-
-#define OMNIA_CMD_LED_COLOR_LED 1
-#define OMNIA_CMD_LED_COLOR_R 2
-#define OMNIA_CMD_LED_COLOR_G 3
-#define OMNIA_CMD_LED_COLOR_B 4
-#define OMNIA_CMD_LED_COLOR_LEN 5
+#define CMD_LED_COLOR 5
+#define CMD_LED_SET_BRIGHTNESS 7
+#define CMD_LED_GET_BRIGHTNESS 8
struct omnia_led {
struct led_classdev_mc mc_cdev;
@@ -41,7 +33,7 @@ struct omnia_led {
int reg;
};
-#define to_omnia_led(l) container_of(l, struct omnia_led, mc_cdev)
+#define to_omnia_led(l) container_of(l, struct omnia_led, mc_cdev)
struct omnia_leds {
struct i2c_client *client;
@@ -55,21 +47,21 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
struct omnia_leds *leds = dev_get_drvdata(cdev->dev->parent);
struct omnia_led *led = to_omnia_led(mc_cdev);
- u8 buf[OMNIA_CMD_LED_COLOR_LEN], state;
+ u8 buf[5], state;
int ret;
mutex_lock(&leds->lock);
led_mc_calc_color_components(&led->mc_cdev, brightness);
- buf[OMNIA_CMD] = CMD_LED_COLOR;
- buf[OMNIA_CMD_LED_COLOR_LED] = led->reg;
- buf[OMNIA_CMD_LED_COLOR_R] = mc_cdev->subled_info[0].brightness;
- buf[OMNIA_CMD_LED_COLOR_G] = mc_cdev->subled_info[1].brightness;
- buf[OMNIA_CMD_LED_COLOR_B] = mc_cdev->subled_info[2].brightness;
+ buf[0] = CMD_LED_COLOR;
+ buf[1] = led->reg;
+ buf[2] = mc_cdev->subled_info[0].brightness;
+ buf[3] = mc_cdev->subled_info[1].brightness;
+ buf[4] = mc_cdev->subled_info[2].brightness;
state = CMD_LED_STATE_LED(led->reg);
- if (buf[OMNIA_CMD_LED_COLOR_R] || buf[OMNIA_CMD_LED_COLOR_G] || buf[OMNIA_CMD_LED_COLOR_B])
+ if (buf[2] || buf[3] || buf[4])
state |= CMD_LED_STATE_ON;
ret = i2c_smbus_write_byte_data(leds->client, CMD_LED_STATE, state);
@@ -98,9 +90,9 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
}
ret = of_property_read_u32(np, "color", &color);
- if (ret || color != LED_COLOR_ID_MULTI) {
+ if (ret || color != LED_COLOR_ID_RGB) {
dev_warn(dev,
- "Node %pOF: must contain 'color' property with value LED_COLOR_ID_MULTI\n",
+ "Node %pOF: must contain 'color' property with value LED_COLOR_ID_RGB\n",
np);
return 0;
}
@@ -126,18 +118,21 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
CMD_LED_MODE_LED(led->reg) |
CMD_LED_MODE_USER);
if (ret < 0) {
- dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np, ret);
+ dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np,
+ ret);
return ret;
}
/* disable the LED */
- ret = i2c_smbus_write_byte_data(client, CMD_LED_STATE, CMD_LED_STATE_LED(led->reg));
+ ret = i2c_smbus_write_byte_data(client, CMD_LED_STATE,
+ CMD_LED_STATE_LED(led->reg));
if (ret < 0) {
dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret);
return ret;
}
- ret = devm_led_classdev_multicolor_register_ext(dev, &led->mc_cdev, &init_data);
+ ret = devm_led_classdev_multicolor_register_ext(dev, &led->mc_cdev,
+ &init_data);
if (ret < 0) {
dev_err(dev, "Cannot register LED %pOF: %i\n", np, ret);
return ret;
@@ -157,7 +152,8 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
* file lives in the device directory of the LED controller, not an individual
* LED, so it should not confuse users.
*/
-static ssize_t brightness_show(struct device *dev, struct device_attribute *a, char *buf)
+static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
+ char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct omnia_leds *leds = i2c_get_clientdata(client);
@@ -173,22 +169,23 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a, c
return sprintf(buf, "%d\n", ret);
}
-static ssize_t brightness_store(struct device *dev, struct device_attribute *a, const char *buf,
- size_t count)
+static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct omnia_leds *leds = i2c_get_clientdata(client);
- unsigned int brightness;
+ unsigned long brightness;
int ret;
- if (sscanf(buf, "%u", &brightness) != 1)
+ if (kstrtoul(buf, 10, &brightness))
return -EINVAL;
if (brightness > 100)
return -EINVAL;
mutex_lock(&leds->lock);
- ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS, (u8) brightness);
+ ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS,
+ (u8)brightness);
mutex_unlock(&leds->lock);
if (ret < 0)
@@ -250,18 +247,18 @@ static int omnia_leds_probe(struct i2c_client *client,
static int omnia_leds_remove(struct i2c_client *client)
{
- u8 buf[OMNIA_CMD_LED_COLOR_LEN];
+ u8 buf[5];
/* put all LEDs into default (HW triggered) mode */
i2c_smbus_write_byte_data(client, CMD_LED_MODE,
CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
/* set all LEDs color to [255, 255, 255] */
- buf[OMNIA_CMD] = CMD_LED_COLOR;
- buf[OMNIA_CMD_LED_COLOR_LED] = OMNIA_BOARD_LEDS;
- buf[OMNIA_CMD_LED_COLOR_R] = 255;
- buf[OMNIA_CMD_LED_COLOR_G] = 255;
- buf[OMNIA_CMD_LED_COLOR_B] = 255;
+ buf[0] = CMD_LED_COLOR;
+ buf[1] = OMNIA_BOARD_LEDS;
+ buf[2] = 255;
+ buf[3] = 255;
+ buf[4] = 255;
i2c_master_send(client, buf, 5);
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 8f39f9ba5c80..4c2ce210c123 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -19,6 +19,7 @@ if NVM
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
+ select CRC32
help
Allows an open-channel SSD to be exposed as a block device to the
host. The target assumes the device exposes raw flash and must be
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index c1bcac71008c..28ddcaa5358b 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -844,11 +844,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
ret = nvm_submit_io_sync_raw(dev, &rqd);
+ __free_page(page);
if (ret)
return ret;
- __free_page(page);
-
return rqd.error;
}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 97c68731406b..1dddba11e721 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -1869,6 +1869,10 @@ void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
struct pblk_line_ws *line_ws;
line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
+ if (!line_ws) {
+ pblk_err(pblk, "pblk: could not allocate memory\n");
+ return;
+ }
line_ws->pblk = pblk;
line_ws->line = line;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 05b1009e2820..f4abe3529acd 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -16,6 +16,13 @@ config ARM_MHU
The controller has 3 mailbox channels, the last of which can be
used in Secure mode only.
+config ARM_MHU_V2
+ tristate "ARM MHUv2 Mailbox"
+ depends on ARM_AMBA
+ help
+ Say Y here if you want to build the ARM MHUv2 controller driver,
+ which provides unidirectional mailboxes between processing elements.
+
config IMX_MBOX
tristate "i.MX Mailbox"
depends on ARCH_MXC || COMPILE_TEST
@@ -201,7 +208,7 @@ config BCM_FLEXRM_MBOX
config STM32_IPCC
tristate "STM32 IPCC Mailbox"
- depends on MACH_STM32MP157
+ depends on MACH_STM32MP157 || COMPILE_TEST
help
Mailbox implementation for STMicroelectonics STM32 family chips
with hardware for Inter-Processor Communication Controller (IPCC)
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 2e06e02b2e03..7194fa92c787 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o
+obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
+
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
index 275efe4cca0c..8eb66c4ecf5b 100644
--- a/drivers/mailbox/arm_mhu_db.c
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -180,7 +180,7 @@ static void mhu_db_shutdown(struct mbox_chan *chan)
/* Reset channel */
mhu_db_mbox_clear_irq(chan);
- kfree(chan->con_priv);
+ devm_kfree(mbox->dev, chan->con_priv);
chan->con_priv = NULL;
}
diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
new file mode 100644
index 000000000000..67fb10885bb4
--- /dev/null
+++ b/drivers/mailbox/arm_mhuv2.c
@@ -0,0 +1,1136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Message Handling Unit Version 2 (MHUv2) driver.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020 Linaro Ltd.
+ *
+ * An MHUv2 mailbox controller can provide up to 124 channel windows (each 32
+ * bit long) and the driver allows any combination of both the transport
+ * protocol modes: data-transfer and doorbell, to be used on those channel
+ * windows.
+ *
+ * The transport protocols should be specified in the device tree entry for the
+ * device. The transport protocols determine how the underlying hardware
+ * resources of the device are utilized when transmitting data. Refer to the
+ * device tree bindings of the ARM MHUv2 controller for more details.
+ *
+ * The number of registered mailbox channels is dependent on both the underlying
+ * hardware - mainly the number of channel windows implemented by the platform,
+ * as well as the selected transport protocols.
+ *
+ * The MHUv2 controller can work both as a sender and receiver, but the driver
+ * and the DT bindings support unidirectional transfers for better allocation of
+ * the channels. That is, this driver will be probed for two separate devices
+ * for each mailbox controller, a sender device and a receiver device.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/interrupt.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/arm_mhuv2_message.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+/* ====== MHUv2 Registers ====== */
+
+/* Maximum number of channel windows */
+#define MHUV2_CH_WN_MAX 124
+/* Number of combined interrupt status registers */
+#define MHUV2_CMB_INT_ST_REG_CNT 4
+#define MHUV2_STAT_BYTES (sizeof(u32))
+#define MHUV2_STAT_BITS (MHUV2_STAT_BYTES * __CHAR_BIT__)
+
+#define LSB_MASK(n) ((1 << (n * __CHAR_BIT__)) - 1)
+#define MHUV2_PROTOCOL_PROP "arm,mhuv2-protocols"
+
+/* Register Message Handling Unit Configuration fields */
+struct mhu_cfg_t {
+ u32 num_ch : 7;
+ u32 pad : 25;
+} __packed;
+
+/* register Interrupt Status fields */
+struct int_st_t {
+ u32 nr2r : 1;
+ u32 r2nr : 1;
+ u32 pad : 30;
+} __packed;
+
+/* Register Interrupt Clear fields */
+struct int_clr_t {
+ u32 nr2r : 1;
+ u32 r2nr : 1;
+ u32 pad : 30;
+} __packed;
+
+/* Register Interrupt Enable fields */
+struct int_en_t {
+ u32 r2nr : 1;
+ u32 nr2r : 1;
+ u32 chcomb : 1;
+ u32 pad : 29;
+} __packed;
+
+/* Register Implementer Identification fields */
+struct iidr_t {
+ u32 implementer : 12;
+ u32 revision : 4;
+ u32 variant : 4;
+ u32 product_id : 12;
+} __packed;
+
+/* Register Architecture Identification Register fields */
+struct aidr_t {
+ u32 arch_minor_rev : 4;
+ u32 arch_major_rev : 4;
+ u32 pad : 24;
+} __packed;
+
+/* Sender Channel Window fields */
+struct mhu2_send_ch_wn_reg {
+ u32 stat;
+ u8 pad1[0x0C - 0x04];
+ u32 stat_set;
+ u32 int_st;
+ u32 int_clr;
+ u32 int_en;
+ u8 pad2[0x20 - 0x1C];
+} __packed;
+
+/* Sender frame register fields */
+struct mhu2_send_frame_reg {
+ struct mhu2_send_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
+ struct mhu_cfg_t mhu_cfg;
+ u32 resp_cfg;
+ u32 access_request;
+ u32 access_ready;
+ struct int_st_t int_st;
+ struct int_clr_t int_clr;
+ struct int_en_t int_en;
+ u32 reserved0;
+ u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
+ u8 pad[0xFC8 - 0xFB0];
+ struct iidr_t iidr;
+ struct aidr_t aidr;
+} __packed;
+
+/* Receiver Channel Window fields */
+struct mhu2_recv_ch_wn_reg {
+ u32 stat;
+ u32 stat_masked;
+ u32 stat_clear;
+ u8 reserved0[0x10 - 0x0C];
+ u32 mask;
+ u32 mask_set;
+ u32 mask_clear;
+ u8 pad[0x20 - 0x1C];
+} __packed;
+
+/* Receiver frame register fields */
+struct mhu2_recv_frame_reg {
+ struct mhu2_recv_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
+ struct mhu_cfg_t mhu_cfg;
+ u8 reserved0[0xF90 - 0xF84];
+ struct int_st_t int_st;
+ struct int_clr_t int_clr;
+ struct int_en_t int_en;
+ u32 pad;
+ u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
+ u8 reserved2[0xFC8 - 0xFB0];
+ struct iidr_t iidr;
+ struct aidr_t aidr;
+} __packed;
+
+
+/* ====== MHUv2 data structures ====== */
+
+enum mhuv2_transport_protocol {
+ DOORBELL = 0,
+ DATA_TRANSFER = 1
+};
+
+enum mhuv2_frame {
+ RECEIVER_FRAME,
+ SENDER_FRAME
+};
+
+/**
+ * struct mhuv2 - MHUv2 mailbox controller data
+ *
+ * @mbox: Mailbox controller belonging to the MHU frame.
+ * @send/recv: Base address of the register mapping region.
+ * @frame: Frame type: RECEIVER_FRAME or SENDER_FRAME.
+ * @irq: Interrupt.
+ * @windows: Channel windows implemented by the platform.
+ * @minor: Minor version of the controller.
+ * @length: Length of the protocols array in bytes.
+ * @protocols: Raw protocol information, derived from device tree.
+ * @doorbell_pending_lock: spinlock required for correct operation of Tx
+ * interrupt for doorbells.
+ */
+struct mhuv2 {
+ struct mbox_controller mbox;
+ union {
+ struct mhu2_send_frame_reg __iomem *send;
+ struct mhu2_recv_frame_reg __iomem *recv;
+ };
+ enum mhuv2_frame frame;
+ unsigned int irq;
+ unsigned int windows;
+ unsigned int minor;
+ unsigned int length;
+ u32 *protocols;
+
+ spinlock_t doorbell_pending_lock;
+};
+
+#define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv2, mbox)
+
+/**
+ * struct mhuv2_protocol_ops - MHUv2 operations
+ *
+ * Each transport protocol must provide an implementation of the operations
+ * provided here.
+ *
+ * @rx_startup: Startup callback for receiver.
+ * @rx_shutdown: Shutdown callback for receiver.
+ * @read_data: Reads and clears newly available data.
+ * @tx_startup: Startup callback for receiver.
+ * @tx_shutdown: Shutdown callback for receiver.
+ * @last_tx_done: Report back if the last tx is completed or not.
+ * @send_data: Send data to the receiver.
+ */
+struct mhuv2_protocol_ops {
+ int (*rx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ void (*rx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ void *(*read_data)(struct mhuv2 *mhu, struct mbox_chan *chan);
+
+ void (*tx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ void (*tx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ int (*last_tx_done)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ int (*send_data)(struct mhuv2 *mhu, struct mbox_chan *chan, void *arg);
+};
+
+/*
+ * MHUv2 mailbox channel's private information
+ *
+ * @ops: protocol specific ops for the channel.
+ * @ch_wn_idx: Channel window index allocated to the channel.
+ * @windows: Total number of windows consumed by the channel, only relevant
+ * in DATA_TRANSFER protocol.
+ * @doorbell: Doorbell bit number within the ch_wn_idx window, only relevant
+ * in DOORBELL protocol.
+ * @pending: Flag indicating pending doorbell interrupt, only relevant in
+ * DOORBELL protocol.
+ */
+struct mhuv2_mbox_chan_priv {
+ const struct mhuv2_protocol_ops *ops;
+ u32 ch_wn_idx;
+ union {
+ u32 windows;
+ struct {
+ u32 doorbell;
+ u32 pending;
+ };
+ };
+};
+
+/* Macro for reading a bitfield within a physically mapped packed struct */
+#define readl_relaxed_bitfield(_regptr, _field) \
+ ({ \
+ u32 _regval; \
+ _regval = readl_relaxed((_regptr)); \
+ (*(typeof((_regptr)))(&_regval))._field; \
+ })
+
+/* Macro for writing a bitfield within a physically mapped packed struct */
+#define writel_relaxed_bitfield(_value, _regptr, _field) \
+ ({ \
+ u32 _regval; \
+ _regval = readl_relaxed(_regptr); \
+ (*(typeof(_regptr))(&_regval))._field = _value; \
+ writel_relaxed(_regval, _regptr); \
+ })
+
+
+/* =================== Doorbell transport protocol operations =============== */
+
+static int mhuv2_doorbell_rx_startup(struct mhuv2 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->recv->ch_wn[priv->ch_wn_idx].mask_clear);
+ return 0;
+}
+
+static void mhuv2_doorbell_rx_shutdown(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->recv->ch_wn[priv->ch_wn_idx].mask_set);
+}
+
+static void *mhuv2_doorbell_read_data(struct mhuv2 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->recv->ch_wn[priv->ch_wn_idx].stat_clear);
+ return NULL;
+}
+
+static int mhuv2_doorbell_last_tx_done(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ return !(readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat) &
+ BIT(priv->doorbell));
+}
+
+static int mhuv2_doorbell_send_data(struct mhuv2 *mhu, struct mbox_chan *chan,
+ void *arg)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
+
+ priv->pending = 1;
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->send->ch_wn[priv->ch_wn_idx].stat_set);
+
+ spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
+
+ return 0;
+}
+
+static const struct mhuv2_protocol_ops mhuv2_doorbell_ops = {
+ .rx_startup = mhuv2_doorbell_rx_startup,
+ .rx_shutdown = mhuv2_doorbell_rx_shutdown,
+ .read_data = mhuv2_doorbell_read_data,
+ .last_tx_done = mhuv2_doorbell_last_tx_done,
+ .send_data = mhuv2_doorbell_send_data,
+};
+#define IS_PROTOCOL_DOORBELL(_priv) (_priv->ops == &mhuv2_doorbell_ops)
+
+/* ============= Data transfer transport protocol operations ================ */
+
+static int mhuv2_data_transfer_rx_startup(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ /*
+ * The protocol mandates that all but the last status register must be
+ * masked.
+ */
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_clear);
+ return 0;
+}
+
+static void mhuv2_data_transfer_rx_shutdown(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
+}
+
+static void *mhuv2_data_transfer_read_data(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ const int windows = priv->windows;
+ struct arm_mhuv2_mbox_msg *msg;
+ u32 *data;
+ int i, idx;
+
+ msg = kzalloc(sizeof(*msg) + windows * MHUV2_STAT_BYTES, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ data = msg->data = msg + 1;
+ msg->len = windows * MHUV2_STAT_BYTES;
+
+ /*
+ * Messages are expected in order of most significant word to least
+ * significant word. Refer mhuv2_data_transfer_send_data() for more
+ * details.
+ *
+ * We also need to read the stat register instead of stat_masked, as we
+ * masked all but the last window.
+ *
+ * Last channel window must be cleared as the final operation. Upon
+ * clearing the last channel window register, which is unmasked in
+ * data-transfer protocol, the interrupt is de-asserted.
+ */
+ for (i = 0; i < windows; i++) {
+ idx = priv->ch_wn_idx + i;
+ data[windows - 1 - i] = readl_relaxed(&mhu->recv->ch_wn[idx].stat);
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[idx].stat_clear);
+ }
+
+ return msg;
+}
+
+static void mhuv2_data_transfer_tx_startup(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ /* Enable interrupts only for the last window */
+ if (mhu->minor) {
+ writel_relaxed(0x1, &mhu->send->ch_wn[i].int_clr);
+ writel_relaxed(0x1, &mhu->send->ch_wn[i].int_en);
+ }
+}
+
+static void mhuv2_data_transfer_tx_shutdown(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ if (mhu->minor)
+ writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
+}
+
+static int mhuv2_data_transfer_last_tx_done(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ /* Just checking the last channel window should be enough */
+ return !readl_relaxed(&mhu->send->ch_wn[i].stat);
+}
+
+/*
+ * Message will be transmitted from most significant to least significant word.
+ * This is to allow for messages shorter than channel windows to still trigger
+ * the receiver interrupt which gets activated when the last stat register is
+ * written. As an example, a 6-word message is to be written on a 4-channel MHU
+ * connection: Registers marked with '*' are masked, and will not generate an
+ * interrupt on the receiver side once written.
+ *
+ * u32 *data = [0x00000001], [0x00000002], [0x00000003], [0x00000004],
+ * [0x00000005], [0x00000006]
+ *
+ * ROUND 1:
+ * stat reg To write Write sequence
+ * [ stat 3 ] <- [0x00000001] 4 <- triggers interrupt on receiver
+ * [ stat 2 ] <- [0x00000002] 3
+ * [ stat 1 ] <- [0x00000003] 2
+ * [ stat 0 ] <- [0x00000004] 1
+ *
+ * data += 4 // Increment data pointer by number of stat regs
+ *
+ * ROUND 2:
+ * stat reg To write Write sequence
+ * [ stat 3 ] <- [0x00000005] 2 <- triggers interrupt on receiver
+ * [ stat 2 ] <- [0x00000006] 1
+ * [ stat 1 ] <- [0x00000000]
+ * [ stat 0 ] <- [0x00000000]
+ */
+static int mhuv2_data_transfer_send_data(struct mhuv2 *mhu,
+ struct mbox_chan *chan, void *arg)
+{
+ const struct arm_mhuv2_mbox_msg *msg = arg;
+ int bytes_left = msg->len, bytes_to_send, bytes_in_round, i;
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int windows = priv->windows;
+ u32 *data = msg->data, word;
+
+ while (bytes_left) {
+ if (!data[0]) {
+ dev_err(mhu->mbox.dev, "Data aligned at first window can't be zero to guarantee interrupt generation at receiver");
+ return -EINVAL;
+ }
+
+ while(!mhuv2_data_transfer_last_tx_done(mhu, chan))
+ continue;
+
+ bytes_in_round = min(bytes_left, (int)(windows * MHUV2_STAT_BYTES));
+
+ for (i = windows - 1; i >= 0; i--) {
+ /* Data less than windows can transfer ? */
+ if (unlikely(bytes_in_round <= i * MHUV2_STAT_BYTES))
+ continue;
+
+ word = data[i];
+ bytes_to_send = bytes_in_round & (MHUV2_STAT_BYTES - 1);
+ if (unlikely(bytes_to_send))
+ word &= LSB_MASK(bytes_to_send);
+ else
+ bytes_to_send = MHUV2_STAT_BYTES;
+
+ writel_relaxed(word, &mhu->send->ch_wn[priv->ch_wn_idx + windows - 1 - i].stat_set);
+ bytes_left -= bytes_to_send;
+ bytes_in_round -= bytes_to_send;
+ }
+
+ data += windows;
+ }
+
+ return 0;
+}
+
+static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = {
+ .rx_startup = mhuv2_data_transfer_rx_startup,
+ .rx_shutdown = mhuv2_data_transfer_rx_shutdown,
+ .read_data = mhuv2_data_transfer_read_data,
+ .tx_startup = mhuv2_data_transfer_tx_startup,
+ .tx_shutdown = mhuv2_data_transfer_tx_shutdown,
+ .last_tx_done = mhuv2_data_transfer_last_tx_done,
+ .send_data = mhuv2_data_transfer_send_data,
+};
+
+/* Interrupt handlers */
+
+static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 *reg)
+{
+ struct mbox_chan *chans = mhu->mbox.chans;
+ int channel = 0, i, offset = 0, windows, protocol, ch_wn;
+ u32 stat;
+
+ for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) {
+ stat = readl_relaxed(reg + i);
+ if (!stat)
+ continue;
+
+ ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat);
+
+ for (i = 0; i < mhu->length; i += 2) {
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (ch_wn >= offset + windows) {
+ if (protocol == DOORBELL)
+ channel += MHUV2_STAT_BITS * windows;
+ else
+ channel++;
+
+ offset += windows;
+ continue;
+ }
+
+ /* Return first chan of the window in doorbell mode */
+ if (protocol == DOORBELL)
+ channel += MHUV2_STAT_BITS * (ch_wn - offset);
+
+ return &chans[channel];
+ }
+ }
+
+ return ERR_PTR(-EIO);
+}
+
+static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
+{
+ struct mhuv2 *mhu = data;
+ struct device *dev = mhu->mbox.dev;
+ struct mhuv2_mbox_chan_priv *priv;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ int i, found = 0;
+ u32 stat;
+
+ chan = get_irq_chan_comb(mhu, mhu->send->chcomb_int_st);
+ if (IS_ERR(chan)) {
+ dev_warn(dev, "Failed to find channel for the Tx interrupt\n");
+ return IRQ_NONE;
+ }
+ priv = chan->con_priv;
+
+ if (!IS_PROTOCOL_DOORBELL(priv)) {
+ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr);
+
+ if (chan->cl) {
+ mbox_chan_txdone(chan, 0);
+ return IRQ_HANDLED;
+ }
+
+ dev_warn(dev, "Tx interrupt Received on channel (%u) not currently attached to a mailbox client\n",
+ priv->ch_wn_idx);
+ return IRQ_NONE;
+ }
+
+ /* Clear the interrupt first, so we don't miss any doorbell later */
+ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx].int_clr);
+
+ /*
+ * In Doorbell mode, make sure no new transitions happen while the
+ * interrupt handler is trying to find the finished doorbell tx
+ * operations, else we may think few of the transfers were complete
+ * before they actually were.
+ */
+ spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
+
+ /*
+ * In case of doorbell mode, the first channel of the window is returned
+ * by get_irq_chan_comb(). Find all the pending channels here.
+ */
+ stat = readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat);
+
+ for (i = 0; i < MHUV2_STAT_BITS; i++) {
+ priv = chan[i].con_priv;
+
+ /* Find cases where pending was 1, but stat's bit is cleared */
+ if (priv->pending ^ ((stat >> i) & 0x1)) {
+ BUG_ON(!priv->pending);
+
+ if (!chan->cl) {
+ dev_warn(dev, "Tx interrupt received on doorbell (%u : %u) channel not currently attached to a mailbox client\n",
+ priv->ch_wn_idx, i);
+ continue;
+ }
+
+ mbox_chan_txdone(&chan[i], 0);
+ priv->pending = 0;
+ found++;
+ }
+ }
+
+ spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
+
+ if (!found) {
+ /*
+ * We may have already processed the doorbell in the previous
+ * iteration if the interrupt came right after we cleared it but
+ * before we read the stat register.
+ */
+ dev_dbg(dev, "Couldn't find the doorbell (%u) for the Tx interrupt interrupt\n",
+ priv->ch_wn_idx);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct mbox_chan *get_irq_chan_comb_rx(struct mhuv2 *mhu)
+{
+ struct mhuv2_mbox_chan_priv *priv;
+ struct mbox_chan *chan;
+ u32 stat;
+
+ chan = get_irq_chan_comb(mhu, mhu->recv->chcomb_int_st);
+ if (IS_ERR(chan))
+ return chan;
+
+ priv = chan->con_priv;
+ if (!IS_PROTOCOL_DOORBELL(priv))
+ return chan;
+
+ /*
+ * In case of doorbell mode, the first channel of the window is returned
+ * by the routine. Find the exact channel here.
+ */
+ stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
+ BUG_ON(!stat);
+
+ return chan + __builtin_ctz(stat);
+}
+
+static struct mbox_chan *get_irq_chan_stat_rx(struct mhuv2 *mhu)
+{
+ struct mbox_chan *chans = mhu->mbox.chans;
+ struct mhuv2_mbox_chan_priv *priv;
+ u32 stat;
+ int i = 0;
+
+ while (i < mhu->mbox.num_chans) {
+ priv = chans[i].con_priv;
+ stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
+
+ if (stat) {
+ if (IS_PROTOCOL_DOORBELL(priv))
+ i += __builtin_ctz(stat);
+ return &chans[i];
+ }
+
+ i += IS_PROTOCOL_DOORBELL(priv) ? MHUV2_STAT_BITS : 1;
+ }
+
+ return ERR_PTR(-EIO);
+}
+
+static struct mbox_chan *get_irq_chan_rx(struct mhuv2 *mhu)
+{
+ if (!mhu->minor)
+ return get_irq_chan_stat_rx(mhu);
+
+ return get_irq_chan_comb_rx(mhu);
+}
+
+static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg)
+{
+ struct mhuv2 *mhu = arg;
+ struct mbox_chan *chan = get_irq_chan_rx(mhu);
+ struct device *dev = mhu->mbox.dev;
+ struct mhuv2_mbox_chan_priv *priv;
+ int ret = IRQ_NONE;
+ void *data;
+
+ if (IS_ERR(chan)) {
+ dev_warn(dev, "Failed to find channel for the rx interrupt\n");
+ return IRQ_NONE;
+ }
+ priv = chan->con_priv;
+
+ /* Read and clear the data first */
+ data = priv->ops->read_data(mhu, chan);
+
+ if (!chan->cl) {
+ dev_warn(dev, "Received data on channel (%u) not currently attached to a mailbox client\n",
+ priv->ch_wn_idx);
+ } else if (IS_ERR(data)) {
+ dev_err(dev, "Failed to read data: %lu\n", PTR_ERR(data));
+ } else {
+ mbox_chan_received_data(chan, data);
+ ret = IRQ_HANDLED;
+ }
+
+ kfree(data);
+ return ret;
+}
+
+/* Sender and receiver ops */
+static bool mhuv2_sender_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ return priv->ops->last_tx_done(mhu, chan);
+}
+
+static int mhuv2_sender_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ if (!priv->ops->last_tx_done(mhu, chan))
+ return -EBUSY;
+
+ return priv->ops->send_data(mhu, chan, data);
+}
+
+static int mhuv2_sender_startup(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ if (priv->ops->tx_startup)
+ priv->ops->tx_startup(mhu, chan);
+ return 0;
+}
+
+static void mhuv2_sender_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ if (priv->ops->tx_shutdown)
+ priv->ops->tx_shutdown(mhu, chan);
+}
+
+static const struct mbox_chan_ops mhuv2_sender_ops = {
+ .send_data = mhuv2_sender_send_data,
+ .startup = mhuv2_sender_startup,
+ .shutdown = mhuv2_sender_shutdown,
+ .last_tx_done = mhuv2_sender_last_tx_done,
+};
+
+static int mhuv2_receiver_startup(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ return priv->ops->rx_startup(mhu, chan);
+}
+
+static void mhuv2_receiver_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ priv->ops->rx_shutdown(mhu, chan);
+}
+
+static int mhuv2_receiver_send_data(struct mbox_chan *chan, void *data)
+{
+ dev_err(chan->mbox->dev,
+ "Trying to transmit on a receiver MHU frame\n");
+ return -EIO;
+}
+
+static bool mhuv2_receiver_last_tx_done(struct mbox_chan *chan)
+{
+ dev_err(chan->mbox->dev, "Trying to Tx poll on a receiver MHU frame\n");
+ return true;
+}
+
+static const struct mbox_chan_ops mhuv2_receiver_ops = {
+ .send_data = mhuv2_receiver_send_data,
+ .startup = mhuv2_receiver_startup,
+ .shutdown = mhuv2_receiver_shutdown,
+ .last_tx_done = mhuv2_receiver_last_tx_done,
+};
+
+static struct mbox_chan *mhuv2_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *pa)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(mbox);
+ struct mbox_chan *chans = mbox->chans;
+ int channel = 0, i, offset, doorbell, protocol, windows;
+
+ if (pa->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ offset = pa->args[0];
+ doorbell = pa->args[1];
+ if (doorbell >= MHUV2_STAT_BITS)
+ goto out;
+
+ for (i = 0; i < mhu->length; i += 2) {
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (protocol == DOORBELL) {
+ if (offset < windows)
+ return &chans[channel + MHUV2_STAT_BITS * offset + doorbell];
+
+ channel += MHUV2_STAT_BITS * windows;
+ offset -= windows;
+ } else {
+ if (offset == 0) {
+ if (doorbell)
+ goto out;
+
+ return &chans[channel];
+ }
+
+ channel++;
+ offset--;
+ }
+ }
+
+out:
+ dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n",
+ pa->args[0], doorbell);
+ return ERR_PTR(-ENODEV);
+}
+
+static int mhuv2_verify_protocol(struct mhuv2 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ int protocol, windows, channels = 0, total_windows = 0, i;
+
+ for (i = 0; i < mhu->length; i += 2) {
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (!windows) {
+ dev_err(dev, "Window size can't be zero (%d)\n", i);
+ return -EINVAL;
+ }
+ total_windows += windows;
+
+ if (protocol == DOORBELL) {
+ channels += MHUV2_STAT_BITS * windows;
+ } else if (protocol == DATA_TRANSFER) {
+ channels++;
+ } else {
+ dev_err(dev, "Invalid protocol (%d) present in %s property at index %d\n",
+ protocol, MHUV2_PROTOCOL_PROP, i);
+ return -EINVAL;
+ }
+ }
+
+ if (total_windows > mhu->windows) {
+ dev_err(dev, "Channel windows can't be more than what's implemented by the hardware ( %d: %d)\n",
+ total_windows, mhu->windows);
+ return -EINVAL;
+ }
+
+ mhu->mbox.num_chans = channels;
+ return 0;
+}
+
+static int mhuv2_allocate_channels(struct mhuv2 *mhu)
+{
+ struct mbox_controller *mbox = &mhu->mbox;
+ struct mhuv2_mbox_chan_priv *priv;
+ struct device *dev = mbox->dev;
+ struct mbox_chan *chans;
+ int protocol, windows = 0, next_window = 0, i, j, k;
+
+ chans = devm_kcalloc(dev, mbox->num_chans, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mbox->chans = chans;
+
+ for (i = 0; i < mhu->length; i += 2) {
+ next_window += windows;
+
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (protocol == DATA_TRANSFER) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ch_wn_idx = next_window;
+ priv->ops = &mhuv2_data_transfer_ops;
+ priv->windows = windows;
+ chans++->con_priv = priv;
+ continue;
+ }
+
+ for (j = 0; j < windows; j++) {
+ for (k = 0; k < MHUV2_STAT_BITS; k++) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ch_wn_idx = next_window + j;
+ priv->ops = &mhuv2_doorbell_ops;
+ priv->doorbell = k;
+ chans++->con_priv = priv;
+ }
+
+ /*
+ * Permanently enable interrupt as we can't
+ * control it per doorbell.
+ */
+ if (mhu->frame == SENDER_FRAME && mhu->minor)
+ writel_relaxed(0x1, &mhu->send->ch_wn[priv->ch_wn_idx].int_en);
+ }
+ }
+
+ /* Make sure we have initialized all channels */
+ BUG_ON(chans - mbox->chans != mbox->num_chans);
+
+ return 0;
+}
+
+static int mhuv2_parse_channels(struct mhuv2 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ const struct device_node *np = dev->of_node;
+ int ret, count;
+ u32 *protocols;
+
+ count = of_property_count_u32_elems(np, MHUV2_PROTOCOL_PROP);
+ if (count <= 0 || count % 2) {
+ dev_err(dev, "Invalid %s property (%d)\n", MHUV2_PROTOCOL_PROP,
+ count);
+ return -EINVAL;
+ }
+
+ protocols = devm_kmalloc_array(dev, count, sizeof(*protocols), GFP_KERNEL);
+ if (!protocols)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, MHUV2_PROTOCOL_PROP, protocols, count);
+ if (ret) {
+ dev_err(dev, "Failed to read %s property: %d\n",
+ MHUV2_PROTOCOL_PROP, ret);
+ return ret;
+ }
+
+ mhu->protocols = protocols;
+ mhu->length = count;
+
+ ret = mhuv2_verify_protocol(mhu);
+ if (ret)
+ return ret;
+
+ return mhuv2_allocate_channels(mhu);
+}
+
+static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu,
+ void __iomem *reg)
+{
+ struct device *dev = mhu->mbox.dev;
+ int ret, i;
+
+ mhu->frame = SENDER_FRAME;
+ mhu->mbox.ops = &mhuv2_sender_ops;
+ mhu->send = reg;
+
+ mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, num_ch);
+ mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, arch_minor_rev);
+
+ spin_lock_init(&mhu->doorbell_pending_lock);
+
+ /*
+ * For minor version 1 and forward, tx interrupt is provided by
+ * the controller.
+ */
+ if (mhu->minor && adev->irq[0]) {
+ ret = devm_request_threaded_irq(dev, adev->irq[0], NULL,
+ mhuv2_sender_interrupt,
+ IRQF_ONESHOT, "mhuv2-tx", mhu);
+ if (ret) {
+ dev_err(dev, "Failed to request tx IRQ, fallback to polling mode: %d\n",
+ ret);
+ } else {
+ mhu->mbox.txdone_irq = true;
+ mhu->mbox.txdone_poll = false;
+ mhu->irq = adev->irq[0];
+
+ writel_relaxed_bitfield(1, &mhu->send->int_en, chcomb);
+
+ /* Disable all channel interrupts */
+ for (i = 0; i < mhu->windows; i++)
+ writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
+
+ goto out;
+ }
+ }
+
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+out:
+ /* Wait for receiver to be ready */
+ writel_relaxed(0x1, &mhu->send->access_request);
+ while (!readl_relaxed(&mhu->send->access_ready))
+ continue;
+
+ return 0;
+}
+
+static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu,
+ void __iomem *reg)
+{
+ struct device *dev = mhu->mbox.dev;
+ int ret, i;
+
+ mhu->frame = RECEIVER_FRAME;
+ mhu->mbox.ops = &mhuv2_receiver_ops;
+ mhu->recv = reg;
+
+ mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, num_ch);
+ mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, arch_minor_rev);
+
+ mhu->irq = adev->irq[0];
+ if (!mhu->irq) {
+ dev_err(dev, "Missing receiver IRQ\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(dev, mhu->irq, NULL,
+ mhuv2_receiver_interrupt, IRQF_ONESHOT,
+ "mhuv2-rx", mhu);
+ if (ret) {
+ dev_err(dev, "Failed to request rx IRQ\n");
+ return ret;
+ }
+
+ /* Mask all the channel windows */
+ for (i = 0; i < mhu->windows; i++)
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
+
+ if (mhu->minor)
+ writel_relaxed_bitfield(1, &mhu->recv->int_en, chcomb);
+
+ return 0;
+}
+
+static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ const struct device_node *np = dev->of_node;
+ struct mhuv2 *mhu;
+ void __iomem *reg;
+ int ret = -EINVAL;
+
+ reg = devm_of_iomap(dev, dev->of_node, 0, NULL);
+ if (!reg)
+ return -ENOMEM;
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.of_xlate = mhuv2_mbox_of_xlate;
+
+ if (of_device_is_compatible(np, "arm,mhuv2-tx"))
+ ret = mhuv2_tx_init(adev, mhu, reg);
+ else if (of_device_is_compatible(np, "arm,mhuv2-rx"))
+ ret = mhuv2_rx_init(adev, mhu, reg);
+ else
+ dev_err(dev, "Invalid compatible property\n");
+
+ if (ret)
+ return ret;
+
+ /* Channel windows can't be 0 */
+ BUG_ON(!mhu->windows);
+
+ ret = mhuv2_parse_channels(mhu);
+ if (ret)
+ return ret;
+
+ amba_set_drvdata(adev, mhu);
+
+ ret = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (ret)
+ dev_err(dev, "failed to register ARM MHUv2 driver %d\n", ret);
+
+ return ret;
+}
+
+static int mhuv2_remove(struct amba_device *adev)
+{
+ struct mhuv2 *mhu = amba_get_drvdata(adev);
+
+ if (mhu->frame == SENDER_FRAME)
+ writel_relaxed(0x0, &mhu->send->access_request);
+
+ return 0;
+}
+
+static struct amba_id mhuv2_ids[] = {
+ {
+ /* 2.0 */
+ .id = 0xbb0d1,
+ .mask = 0xfffff,
+ },
+ {
+ /* 2.1 */
+ .id = 0xbb076,
+ .mask = 0xfffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhuv2_ids);
+
+static struct amba_driver mhuv2_driver = {
+ .drv = {
+ .name = "arm-mhuv2",
+ },
+ .id_table = mhuv2_ids,
+ .probe = mhuv2_probe,
+ .remove = mhuv2_remove,
+};
+module_amba_driver(mhuv2_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHUv2 Driver");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Tushar Khandelwal <tushar.khandelwal@arm.com>");
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index ef966887aa15..b84e0587937c 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -144,11 +144,11 @@ static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
{
- unsigned int chan = (unsigned int)link->con_priv;
+ unsigned long chan = (unsigned long)link->con_priv;
struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
controller);
- dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan);
+ dev_dbg(ipcc->controller.dev, "%s: chan:%lu\n", __func__, chan);
/* set channel n occupied */
stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
@@ -163,7 +163,7 @@ static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
static int stm32_ipcc_startup(struct mbox_chan *link)
{
- unsigned int chan = (unsigned int)link->con_priv;
+ unsigned long chan = (unsigned long)link->con_priv;
struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
controller);
int ret;
@@ -183,7 +183,7 @@ static int stm32_ipcc_startup(struct mbox_chan *link)
static void stm32_ipcc_shutdown(struct mbox_chan *link)
{
- unsigned int chan = (unsigned int)link->con_priv;
+ unsigned long chan = (unsigned long)link->con_priv;
struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
controller);
@@ -206,7 +206,7 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct stm32_ipcc *ipcc;
struct resource *res;
- unsigned int i;
+ unsigned long i;
int ret;
u32 ip_ver;
static const char * const irq_name[] = {"rx", "tx"};
@@ -257,9 +257,6 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
for (i = 0; i < IPCC_IRQ_NUM; i++) {
ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
if (ipcc->irqs[i] < 0) {
- if (ipcc->irqs[i] != -EPROBE_DEFER)
- dev_err(dev, "no IRQ specified %s\n",
- irq_name[i]);
ret = ipcc->irqs[i];
goto err_clk;
}
@@ -268,7 +265,7 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
irq_thread[i], IRQF_ONESHOT,
dev_name(dev), ipcc);
if (ret) {
- dev_err(dev, "failed to request irq %d (%d)\n", i, ret);
+ dev_err(dev, "failed to request irq %lu (%d)\n", i, ret);
goto err_clk;
}
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 30ba3573626c..9e44c09f6410 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -463,6 +463,15 @@ config DM_MULTIPATH_HST
If unsure, say N.
+config DM_MULTIPATH_IOA
+ tristate "I/O Path Selector based on CPU submission"
+ depends on DM_MULTIPATH
+ help
+ This path selector selects the path based on the CPU the IO is
+ executed on and the CPU to path mapping setup at path addition time.
+
+ If unsure, say N.
+
config DM_DELAY
tristate "I/O delaying target"
depends on BLK_DEV_DM
@@ -530,11 +539,22 @@ config DM_VERITY_VERIFY_ROOTHASH_SIG
bool "Verity data device root hash signature verification support"
depends on DM_VERITY
select SYSTEM_DATA_VERIFICATION
- help
+ help
Add ability for dm-verity device to be validated if the
pre-generated tree of cryptographic checksums passed has a pkcs#7
signature file that can validate the roothash of the tree.
+ By default, rely on the builtin trusted keyring.
+
+ If unsure, say N.
+
+config DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING
+ bool "Verity data device root hash signature verification with secondary keyring"
+ depends on DM_VERITY_VERIFY_ROOTHASH_SIG
+ depends on SECONDARY_TRUSTED_KEYRING
+ help
+ Rely on the secondary trusted keyring to verify dm-verity signatures.
+
If unsure, say N.
config DM_VERITY_FEC
@@ -585,6 +605,7 @@ config DM_INTEGRITY
select BLK_DEV_INTEGRITY
select DM_BUFIO
select CRYPTO
+ select CRYPTO_SKCIPHER
select ASYNC_XOR
help
This device-mapper target emulates a block device that has
@@ -602,6 +623,7 @@ config DM_ZONED
tristate "Drive-managed zoned block device target support"
depends on BLK_DEV_DM
depends on BLK_DEV_ZONED
+ select CRC32
help
This device-mapper target takes a host-managed or host-aware zoned
block device and exposes most of its capacity as a regular block
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 6d3e234dc46a..ef7ddc27685c 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -7,23 +7,28 @@ dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o \
dm-rq.o
dm-multipath-y += dm-path-selector.o dm-mpath.o
+dm-historical-service-time-y += dm-ps-historical-service-time.o
+dm-io-affinity-y += dm-ps-io-affinity.o
+dm-queue-length-y += dm-ps-queue-length.o
+dm-round-robin-y += dm-ps-round-robin.o
+dm-service-time-y += dm-ps-service-time.o
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
dm-mirror-y += dm-raid1.o
-dm-log-userspace-y \
- += dm-log-userspace-base.o dm-log-userspace-transfer.o
+dm-log-userspace-y += dm-log-userspace-base.o dm-log-userspace-transfer.o
dm-bio-prison-y += dm-bio-prison-v1.o dm-bio-prison-v2.o
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o \
dm-cache-background-tracker.o
-dm-cache-smq-y += dm-cache-policy-smq.o
+dm-cache-smq-y += dm-cache-policy-smq.o
dm-ebs-y += dm-ebs-target.o
dm-era-y += dm-era-target.o
dm-clone-y += dm-clone-target.o dm-clone-metadata.o
dm-verity-y += dm-verity-target.o
+dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
+
md-mod-y += md.o md-bitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
-dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
linear-y += md-linear.o
multipath-y += md-multipath.o
faulty-y += md-faulty.o
@@ -59,14 +64,15 @@ obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
obj-$(CONFIG_DM_MULTIPATH_HST) += dm-historical-service-time.o
+obj-$(CONFIG_DM_MULTIPATH_IOA) += dm-io-affinity.o
obj-$(CONFIG_DM_SWITCH) += dm-switch.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
-obj-$(CONFIG_DM_PERSISTENT_DATA) += persistent-data/
+obj-$(CONFIG_DM_PERSISTENT_DATA) += persistent-data/
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
-obj-$(CONFIG_DM_RAID) += dm-raid.o
-obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
+obj-$(CONFIG_DM_RAID) += dm-raid.o
+obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 6469223f0b77..d636b7b2d070 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -17,7 +17,7 @@ struct feature {
};
static struct feature feature_list[] = {
- {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
+ {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
"large_bucket"},
{0, 0, 0 },
};
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
index a1653c478041..d1c8fd3977fc 100644
--- a/drivers/md/bcache/features.h
+++ b/drivers/md/bcache/features.h
@@ -13,11 +13,15 @@
/* Feature set definition */
/* Incompat feature set */
-#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
+/* 32bit bucket size, obsoleted */
+#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
+/* real bucket size is (1 << bucket_size) */
+#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
-#define BCH_FEATURE_COMPAT_SUUP 0
-#define BCH_FEATURE_RO_COMPAT_SUUP 0
-#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
+#define BCH_FEATURE_COMPAT_SUPP 0
+#define BCH_FEATURE_RO_COMPAT_SUPP 0
+#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
+ BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
((sb)->feature_compat & (mask))
@@ -29,6 +33,8 @@
#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_compat & \
BCH##_FEATURE_COMPAT_##flagname) != 0); \
} \
@@ -46,6 +52,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_ro_compat & \
BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
} \
@@ -63,6 +71,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
return (((sb)->feature_incompat & \
BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
} \
@@ -77,7 +87,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
~BCH##_FEATURE_INCOMPAT_##flagname; \
}
-BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
+
+static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
+}
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 214326383145..85b1f2a9b72d 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -475,7 +475,7 @@ struct search {
unsigned int read_dirty_data:1;
unsigned int cache_missed:1;
- struct hd_struct *part;
+ struct block_device *part;
unsigned long start_time;
struct btree_op op;
@@ -1073,7 +1073,7 @@ struct detached_dev_io_private {
unsigned long start_time;
bio_end_io_t *bi_end_io;
void *bi_private;
- struct hd_struct *part;
+ struct block_device *part;
};
static void detached_dev_end_io(struct bio *bio)
@@ -1230,8 +1230,9 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
if (dc->io_disable)
return -EIO;
-
- return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
+ if (!dc->bdev->bd_disk->fops->ioctl)
+ return -ENOTTY;
+ return dc->bdev->bd_disk->fops->ioctl(dc->bdev, mode, cmd, arg);
}
void bch_cached_dev_request_init(struct cached_dev *dc)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 46a00134a36a..2047a9cccdb5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
{
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
- if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
- bch_has_feature_large_bucket(sb))
- bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ if (bch_has_feature_large_bucket(sb)) {
+ unsigned int max, order;
+
+ max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
+ order = le16_to_cpu(s->bucket_size);
+ /*
+ * bcache tool will make sure the overflow won't
+ * happen, an error message here is enough.
+ */
+ if (order > max)
+ pr_err("Bucket size (1 << %u) overflows\n",
+ order);
+ bucket_size = 1 << order;
+ } else if (bch_has_feature_obso_large_bucket(sb)) {
+ bucket_size +=
+ le16_to_cpu(s->obso_bucket_size_hi) << 16;
+ }
+ }
return bucket_size;
}
@@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->feature_compat = le64_to_cpu(s->feature_compat);
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+
+ /* Check incompatible features */
+ err = "Unsupported compatible feature found";
+ if (bch_has_unknown_compat_features(sb))
+ goto err;
+
+ err = "Unsupported read-only compatible feature found";
+ if (bch_has_unknown_ro_compat_features(sb))
+ goto err;
+
+ err = "Unsupported incompatible feature found";
+ if (bch_has_unknown_incompat_features(sb))
+ goto err;
+
err = read_super_common(sb, bdev, s);
if (err)
goto err;
@@ -1114,9 +1144,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
static void cached_dev_detach_finish(struct work_struct *w)
{
struct cached_dev *dc = container_of(w, struct cached_dev, detach);
- struct closure cl;
-
- closure_init_stack(&cl);
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(refcount_read(&dc->count));
@@ -1130,12 +1157,6 @@ static void cached_dev_detach_finish(struct work_struct *w)
dc->writeback_thread = NULL;
}
- memset(&dc->sb.set_uuid, 0, 16);
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
-
- bch_write_bdev_super(dc, &cl);
- closure_sync(&cl);
-
mutex_lock(&bch_register_lock);
calc_cached_dev_sectors(dc->disk.c);
@@ -1311,6 +1332,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
+ if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(dc->disk.disk, 1);
+ }
+
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
@@ -1408,7 +1435,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
q->limits.raid_partial_stripes_expensive;
ret = bcache_device_init(&dc->disk, block_size,
- dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
+ bdev_nr_sectors(dc->bdev) - dc->sb.data_offset,
dc->bdev, &bcache_cached_ops);
if (ret)
return ret;
@@ -1447,8 +1474,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto err;
err = "error creating kobject";
- if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
- "bcache"))
+ if (kobject_add(&dc->disk.kobj, bdev_kobj(bdev), "bcache"))
goto err;
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
@@ -1534,6 +1560,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_link(d, c, "volume");
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(d->disk, 1);
+ }
+
return 0;
err:
kobject_put(&d->kobj);
@@ -2093,6 +2125,9 @@ static int run_cache_set(struct cache_set *c)
c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb))
+ pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
+
list_for_each_entry_safe(dc, t, &uncached_devices, list)
bch_cached_dev_attach(dc, c, NULL);
@@ -2342,9 +2377,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto err;
}
- if (kobject_add(&ca->kobj,
- &part_to_dev(bdev->bd_part)->kobj,
- "bcache")) {
+ if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) {
err = "error calling kobject_add";
ret = -ENOMEM;
goto out;
@@ -2383,38 +2416,38 @@ kobj_attribute_write(register, register_bcache);
kobj_attribute_write(register_quiet, register_bcache);
kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
-static bool bch_is_open_backing(struct block_device *bdev)
+static bool bch_is_open_backing(dev_t dev)
{
struct cache_set *c, *tc;
struct cached_dev *dc, *t;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
list_for_each_entry_safe(dc, t, &c->cached_devs, list)
- if (dc->bdev == bdev)
+ if (dc->bdev->bd_dev == dev)
return true;
list_for_each_entry_safe(dc, t, &uncached_devices, list)
- if (dc->bdev == bdev)
+ if (dc->bdev->bd_dev == dev)
return true;
return false;
}
-static bool bch_is_open_cache(struct block_device *bdev)
+static bool bch_is_open_cache(dev_t dev)
{
struct cache_set *c, *tc;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
struct cache *ca = c->cache;
- if (ca->bdev == bdev)
+ if (ca->bdev->bd_dev == dev)
return true;
}
return false;
}
-static bool bch_is_open(struct block_device *bdev)
+static bool bch_is_open(dev_t dev)
{
- return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
+ return bch_is_open_cache(dev) || bch_is_open_backing(dev);
}
struct async_reg_args {
@@ -2538,15 +2571,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
sb);
if (IS_ERR(bdev)) {
if (bdev == ERR_PTR(-EBUSY)) {
- bdev = lookup_bdev(strim(path));
+ dev_t dev;
+
mutex_lock(&bch_register_lock);
- if (!IS_ERR(bdev) && bch_is_open(bdev))
+ if (lookup_bdev(strim(path), &dev) == 0 &&
+ bch_is_open(dev))
err = "device already registered";
else
err = "device busy";
mutex_unlock(&bch_register_lock);
- if (!IS_ERR(bdev))
- bdput(bdev);
if (attr == &ksysfs_register_quiet)
goto done;
}
@@ -2656,8 +2689,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
}
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
- char *pdev_set_uuid = pdev->dc->sb.set_uuid;
char *set_uuid = c->set_uuid;
if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 554e3afc9b68..00a520c03f41 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -404,7 +404,7 @@ STORE(__cached_dev)
if (!env)
return -ENOMEM;
add_uevent_var(env, "DRIVER=bcache");
- add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
+ add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid);
add_uevent_var(env, "CACHED_LABEL=%s", buf);
kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
KOBJ_CHANGE,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 3c74996978da..a129e4d2707c 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -705,6 +705,15 @@ static int bch_writeback_thread(void *arg)
* bch_cached_dev_detach().
*/
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ memset(&dc->sb.set_uuid, 0, 16);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
+
+ bch_write_bdev_super(dc, &cl);
+ closure_sync(&cl);
+
up_write(&dc->writeback_lock);
break;
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9c1a86bde658..fce4cbf9529d 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
}
EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
+{
+ return c->dm_io;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
+
sector_t dm_bufio_get_block_number(struct dm_buffer *b)
{
return b->block;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 4bc453f5bbaa..541c45027cc8 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2840,7 +2840,6 @@ static void cache_postsuspend(struct dm_target *ti)
static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
bool dirty, uint32_t hint, bool hint_valid)
{
- int r;
struct cache *cache = context;
if (dirty) {
@@ -2849,11 +2848,7 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
} else
clear_bit(from_cblock(cblock), cache->dirty_bitset);
- r = policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
- if (r)
- return r;
-
- return 0;
+ return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
}
/*
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index d522093cb39d..086d293c2b03 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -96,19 +96,12 @@ struct mapped_device {
*/
struct workqueue_struct *wq;
- /*
- * freeze/thaw support require holding onto a super block
- */
- struct super_block *frozen_sb;
-
/* forced geometry settings */
struct hd_geometry geometry;
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
- struct block_device *bdev;
-
struct dm_stats stats;
/* for blk-mq request-based DM support */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 392337f16ecf..5a55617a08e6 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1090,16 +1090,16 @@ static const struct crypt_iv_operations crypt_iv_tcw_ops = {
.post = crypt_iv_tcw_post
};
-static struct crypt_iv_operations crypt_iv_random_ops = {
+static const struct crypt_iv_operations crypt_iv_random_ops = {
.generator = crypt_iv_random_gen
};
-static struct crypt_iv_operations crypt_iv_eboiv_ops = {
+static const struct crypt_iv_operations crypt_iv_eboiv_ops = {
.ctr = crypt_iv_eboiv_ctr,
.generator = crypt_iv_eboiv_gen
};
-static struct crypt_iv_operations crypt_iv_elephant_ops = {
+static const struct crypt_iv_operations crypt_iv_elephant_ops = {
.ctr = crypt_iv_elephant_ctr,
.dtr = crypt_iv_elephant_dtr,
.init = crypt_iv_elephant_init,
@@ -1454,13 +1454,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
static void kcryptd_async_done(struct crypto_async_request *async_req,
int error);
-static void crypt_alloc_req_skcipher(struct crypt_config *cc,
+static int crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx)
{
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
- if (!ctx->r.req)
- ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
+ if (!ctx->r.req) {
+ ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+ if (!ctx->r.req)
+ return -ENOMEM;
+ }
skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
@@ -1471,13 +1474,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
skcipher_request_set_callback(ctx->r.req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
+
+ return 0;
}
-static void crypt_alloc_req_aead(struct crypt_config *cc,
+static int crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
- if (!ctx->r.req_aead)
- ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
+ if (!ctx->r.req_aead) {
+ ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+ if (!ctx->r.req_aead)
+ return -ENOMEM;
+ }
aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
@@ -1488,15 +1496,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
aead_request_set_callback(ctx->r.req_aead,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
+
+ return 0;
}
-static void crypt_alloc_req(struct crypt_config *cc,
+static int crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
if (crypt_integrity_aead(cc))
- crypt_alloc_req_aead(cc, ctx);
+ return crypt_alloc_req_aead(cc, ctx);
else
- crypt_alloc_req_skcipher(cc, ctx);
+ return crypt_alloc_req_skcipher(cc, ctx);
}
static void crypt_free_req_skcipher(struct crypt_config *cc,
@@ -1529,17 +1539,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
* Encrypt / decrypt data from one bio to another one (can be the same one)
*/
static blk_status_t crypt_convert(struct crypt_config *cc,
- struct convert_context *ctx, bool atomic)
+ struct convert_context *ctx, bool atomic, bool reset_pending)
{
unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;
- atomic_set(&ctx->cc_pending, 1);
+ /*
+ * if reset_pending is set we are dealing with the bio for the first time,
+ * else we're continuing to work on the previous bio, so don't mess with
+ * the cc_pending counter
+ */
+ if (reset_pending)
+ atomic_set(&ctx->cc_pending, 1);
while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
- crypt_alloc_req(cc, ctx);
+ r = crypt_alloc_req(cc, ctx);
+ if (r) {
+ complete(&ctx->restart);
+ return BLK_STS_DEV_RESOURCE;
+ }
+
atomic_inc(&ctx->cc_pending);
if (crypt_integrity_aead(cc))
@@ -1553,7 +1574,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* but the driver request queue is full, let's wait.
*/
case -EBUSY:
- wait_for_completion(&ctx->restart);
+ if (in_interrupt()) {
+ if (try_wait_for_completion(&ctx->restart)) {
+ /*
+ * we don't have to block to wait for completion,
+ * so proceed
+ */
+ } else {
+ /*
+ * we can't wait for completion without blocking
+ * exit and continue processing in a workqueue
+ */
+ ctx->r.req = NULL;
+ ctx->cc_sector += sector_step;
+ tag_offset++;
+ return BLK_STS_DEV_RESOURCE;
+ }
+ } else {
+ wait_for_completion(&ctx->restart);
+ }
reinit_completion(&ctx->restart);
fallthrough;
/*
@@ -1691,6 +1730,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
+static void kcryptd_io_bio_endio(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ bio_endio(io->base_bio);
+}
+
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
@@ -1713,7 +1758,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
kfree(io->integrity_metadata);
base_bio->bi_status = error;
- bio_endio(base_bio);
+
+ /*
+ * If we are running this function from our tasklet,
+ * we can't call bio_endio() here, because it will call
+ * clone_endio() from dm.c, which in turn will
+ * free the current struct dm_crypt_io structure with
+ * our tasklet. In this case we need to delay bio_endio()
+ * execution to after the tasklet is done and dequeued.
+ */
+ if (tasklet_trylock(&io->tasklet)) {
+ tasklet_unlock(&io->tasklet);
+ bio_endio(base_bio);
+ return;
+ }
+
+ INIT_WORK(&io->work, kcryptd_io_bio_endio);
+ queue_work(cc->io_queue, &io->work);
}
/*
@@ -1945,6 +2006,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
}
}
+static void kcryptd_crypt_write_continue(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ struct crypt_config *cc = io->cc;
+ struct convert_context *ctx = &io->ctx;
+ int crypt_finished;
+ sector_t sector = io->sector;
+ blk_status_t r;
+
+ wait_for_completion(&ctx->restart);
+ reinit_completion(&ctx->restart);
+
+ r = crypt_convert(cc, &io->ctx, true, false);
+ if (r)
+ io->error = r;
+ crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
+ if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
+ /* Wait for completion signaled by kcryptd_async_done() */
+ wait_for_completion(&ctx->restart);
+ crypt_finished = 1;
+ }
+
+ /* Encryption was already finished, submit io now */
+ if (crypt_finished) {
+ kcryptd_crypt_write_io_submit(io, 0);
+ io->sector = sector;
+ }
+
+ crypt_dec_pending(io);
+}
+
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -1973,7 +2065,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
- test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
+ test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+ * (TODO: is it actually possible to be in softirq in the write path?)
+ */
+ if (r == BLK_STS_DEV_RESOURCE) {
+ INIT_WORK(&io->work, kcryptd_crypt_write_continue);
+ queue_work(cc->crypt_queue, &io->work);
+ return;
+ }
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
@@ -1998,6 +2100,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
crypt_dec_pending(io);
}
+static void kcryptd_crypt_read_continue(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ struct crypt_config *cc = io->cc;
+ blk_status_t r;
+
+ wait_for_completion(&io->ctx.restart);
+ reinit_completion(&io->ctx.restart);
+
+ r = crypt_convert(cc, &io->ctx, true, false);
+ if (r)
+ io->error = r;
+
+ if (atomic_dec_and_test(&io->ctx.cc_pending))
+ kcryptd_crypt_read_done(io);
+
+ crypt_dec_pending(io);
+}
+
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -2009,7 +2130,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
io->sector);
r = crypt_convert(cc, &io->ctx,
- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+ */
+ if (r == BLK_STS_DEV_RESOURCE) {
+ INIT_WORK(&io->work, kcryptd_crypt_read_continue);
+ queue_work(cc->crypt_queue, &io->work);
+ return;
+ }
if (r)
io->error = r;
@@ -2091,8 +2221,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
(bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
- if (in_irq()) {
- /* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
+ /*
+ * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
+ * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
+ * it is being executed with irqs disabled.
+ */
+ if (in_irq() || irqs_disabled()) {
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index cb85610527c2..55bcfb74f51f 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -86,7 +86,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
else
ba = dm_bufio_new(ec->bufio, block, &b);
- if (unlikely(IS_ERR(ba))) {
+ if (IS_ERR(ba)) {
/*
* Carry on with next buffer, if any, to issue all possible
* data but return error.
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 5a7a1b90e671..b64fede032dc 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -257,8 +257,9 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
- bool fix_padding;
bool discard;
+ bool fix_padding;
+ bool legacy_recalculate;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -386,6 +387,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
return READ_ONCE(ic->failed);
}
+static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+{
+ if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
+ !ic->legacy_recalculate)
+ return true;
+ return false;
+}
+
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
unsigned j, unsigned char seq)
{
@@ -1379,12 +1388,52 @@ thorough_test:
#undef MAY_BE_HASH
}
-static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
+struct flush_request {
+ struct dm_io_request io_req;
+ struct dm_io_region io_reg;
+ struct dm_integrity_c *ic;
+ struct completion comp;
+};
+
+static void flush_notify(unsigned long error, void *fr_)
+{
+ struct flush_request *fr = fr_;
+ if (unlikely(error != 0))
+ dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
+ complete(&fr->comp);
+}
+
+static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
{
int r;
+
+ struct flush_request fr;
+
+ if (!ic->meta_dev)
+ flush_data = false;
+ if (flush_data) {
+ fr.io_req.bi_op = REQ_OP_WRITE,
+ fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ fr.io_req.mem.type = DM_IO_KMEM,
+ fr.io_req.mem.ptr.addr = NULL,
+ fr.io_req.notify.fn = flush_notify,
+ fr.io_req.notify.context = &fr;
+ fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
+ fr.io_reg.bdev = ic->dev->bdev,
+ fr.io_reg.sector = 0,
+ fr.io_reg.count = 0,
+ fr.ic = ic;
+ init_completion(&fr.comp);
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+ BUG_ON(r);
+ }
+
r = dm_bufio_write_dirty_buffers(ic->bufio);
if (unlikely(r))
dm_integrity_io_error(ic, "writing tags", r);
+
+ if (flush_data)
+ wait_for_completion(&fr.comp);
}
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
@@ -2110,7 +2159,7 @@ offload_to_thread:
if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
integrity_metadata(&dio->work);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
dio->completion = NULL;
@@ -2195,7 +2244,7 @@ static void integrity_commit(struct work_struct *w)
flushes = bio_list_get(&ic->flush_bio_list);
if (unlikely(ic->mode != 'J')) {
spin_unlock_irq(&ic->endio_wait.lock);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
goto release_flush_bios;
}
@@ -2409,7 +2458,7 @@ skip_io:
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
static void integrity_writer(struct work_struct *w)
@@ -2451,7 +2500,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
{
int r;
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
if (dm_integrity_failed(ic))
return;
@@ -2654,7 +2703,7 @@ static void bitmap_flush_work(struct work_struct *work)
unsigned long limit;
struct bio *bio;
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
range.logical_sector = 0;
range.n_sectors = ic->provided_data_sectors;
@@ -2663,9 +2712,7 @@ static void bitmap_flush_work(struct work_struct *work)
add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
- dm_integrity_flush_buffers(ic);
- if (ic->meta_dev)
- blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
+ dm_integrity_flush_buffers(ic, true);
limit = ic->provided_data_sectors;
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
@@ -2934,11 +2981,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
if (ic->meta_dev)
queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
if (ic->mode == 'B') {
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
#if 1
/* set to 0 to test bitmap replay code */
init_journal(ic, 0, ic->journal_sections, 0);
@@ -3102,6 +3149,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
+ arg_count += ic->legacy_recalculate;
DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
@@ -3125,6 +3173,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
}
if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
DMEMIT(" fix_padding");
+ if (ic->legacy_recalculate)
+ DMEMIT(" legacy_recalculate");
#define EMIT_ALG(a, n) \
do { \
@@ -3754,7 +3804,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
- {0, 9, "Invalid number of feature args"},
+ {0, 16, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
@@ -3902,6 +3952,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) {
ic->fix_padding = true;
+ } else if (!strcmp(opt_string, "legacy_recalculate")) {
+ ic->legacy_recalculate = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -4197,6 +4249,20 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
+ } else {
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ ti->error = "Recalculate can only be specified with internal_hash";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
+ dm_integrity_disable_recalculate(ic)) {
+ ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
+ r = -EOPNOTSUPP;
+ goto bad;
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index cd0478d44058..5e306bba4375 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1600,6 +1600,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
if (!argc) {
DMWARN("Empty message received.");
+ r = -EINVAL;
goto out_argv;
}
diff --git a/drivers/md/dm-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 186f91e2752c..186f91e2752c 100644
--- a/drivers/md/dm-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
new file mode 100644
index 000000000000..077655cd4fae
--- /dev/null
+++ b/drivers/md/dm-ps-io-affinity.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Oracle Corporation
+ *
+ * Module Author: Mike Christie
+ */
+#include "dm-path-selector.h"
+
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "multipath io-affinity"
+
+struct path_info {
+ struct dm_path *path;
+ cpumask_var_t cpumask;
+ refcount_t refcount;
+ bool failed;
+};
+
+struct selector {
+ struct path_info **path_map;
+ cpumask_var_t path_mask;
+ atomic_t map_misses;
+};
+
+static void ioa_free_path(struct selector *s, unsigned int cpu)
+{
+ struct path_info *pi = s->path_map[cpu];
+
+ if (!pi)
+ return;
+
+ if (refcount_dec_and_test(&pi->refcount)) {
+ cpumask_clear_cpu(cpu, s->path_mask);
+ free_cpumask_var(pi->cpumask);
+ kfree(pi);
+
+ s->path_map[cpu] = NULL;
+ }
+}
+
+static int ioa_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL;
+ unsigned int cpu;
+ int ret;
+
+ if (argc != 1) {
+ *error = "io-affinity ps: invalid number of arguments";
+ return -EINVAL;
+ }
+
+ pi = kzalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "io-affinity ps: Error allocating path context";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ path->pscontext = pi;
+ refcount_set(&pi->refcount, 1);
+
+ if (!zalloc_cpumask_var(&pi->cpumask, GFP_KERNEL)) {
+ *error = "io-affinity ps: Error allocating cpumask context";
+ ret = -ENOMEM;
+ goto free_pi;
+ }
+
+ ret = cpumask_parse(argv[0], pi->cpumask);
+ if (ret) {
+ *error = "io-affinity ps: invalid cpumask";
+ ret = -EINVAL;
+ goto free_mask;
+ }
+
+ for_each_cpu(cpu, pi->cpumask) {
+ if (cpu >= nr_cpu_ids) {
+ DMWARN_LIMIT("Ignoring mapping for CPU %u. Max CPU is %u",
+ cpu, nr_cpu_ids);
+ break;
+ }
+
+ if (s->path_map[cpu]) {
+ DMWARN("CPU mapping for %u exists. Ignoring.", cpu);
+ continue;
+ }
+
+ cpumask_set_cpu(cpu, s->path_mask);
+ s->path_map[cpu] = pi;
+ refcount_inc(&pi->refcount);
+ continue;
+ }
+
+ if (refcount_dec_and_test(&pi->refcount)) {
+ *error = "io-affinity ps: No new/valid CPU mapping found";
+ ret = -EINVAL;
+ goto free_mask;
+ }
+
+ return 0;
+
+free_mask:
+ free_cpumask_var(pi->cpumask);
+free_pi:
+ kfree(pi);
+ return ret;
+}
+
+static int ioa_create(struct path_selector *ps, unsigned argc, char **argv)
+{
+ struct selector *s;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->path_map = kzalloc(nr_cpu_ids * sizeof(struct path_info *),
+ GFP_KERNEL);
+ if (!s->path_map)
+ goto free_selector;
+
+ if (!zalloc_cpumask_var(&s->path_mask, GFP_KERNEL))
+ goto free_map;
+
+ atomic_set(&s->map_misses, 0);
+ ps->context = s;
+ return 0;
+
+free_map:
+ kfree(s->path_map);
+free_selector:
+ kfree(s);
+ return -ENOMEM;
+}
+
+static void ioa_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+ unsigned cpu;
+
+ for_each_cpu(cpu, s->path_mask)
+ ioa_free_path(s, cpu);
+
+ free_cpumask_var(s->path_mask);
+ kfree(s->path_map);
+ kfree(s);
+
+ ps->context = NULL;
+}
+
+static int ioa_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned int maxlen)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ int sz = 0;
+
+ if (!path) {
+ DMEMIT("0 ");
+ return sz;
+ }
+
+ switch(type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%d ", atomic_read(&s->map_misses));
+ break;
+ case STATUSTYPE_TABLE:
+ pi = path->pscontext;
+ DMEMIT("%*pb ", cpumask_pr_args(pi->cpumask));
+ break;
+ }
+
+ return sz;
+}
+
+static void ioa_fail_path(struct path_selector *ps, struct dm_path *p)
+{
+ struct path_info *pi = p->pscontext;
+
+ pi->failed = true;
+}
+
+static int ioa_reinstate_path(struct path_selector *ps, struct dm_path *p)
+{
+ struct path_info *pi = p->pscontext;
+
+ pi->failed = false;
+ return 0;
+}
+
+static struct dm_path *ioa_select_path(struct path_selector *ps,
+ size_t nr_bytes)
+{
+ unsigned int cpu, node;
+ struct selector *s = ps->context;
+ const struct cpumask *cpumask;
+ struct path_info *pi;
+ int i;
+
+ cpu = get_cpu();
+
+ pi = s->path_map[cpu];
+ if (pi && !pi->failed)
+ goto done;
+
+ /*
+ * Perf is not optimal, but we at least try the local node then just
+ * try not to fail.
+ */
+ if (!pi)
+ atomic_inc(&s->map_misses);
+
+ node = cpu_to_node(cpu);
+ cpumask = cpumask_of_node(node);
+ for_each_cpu(i, cpumask) {
+ pi = s->path_map[i];
+ if (pi && !pi->failed)
+ goto done;
+ }
+
+ for_each_cpu(i, s->path_mask) {
+ pi = s->path_map[i];
+ if (pi && !pi->failed)
+ goto done;
+ }
+ pi = NULL;
+
+done:
+ put_cpu();
+ return pi ? pi->path : NULL;
+}
+
+static struct path_selector_type ioa_ps = {
+ .name = "io-affinity",
+ .module = THIS_MODULE,
+ .table_args = 1,
+ .info_args = 1,
+ .create = ioa_create,
+ .destroy = ioa_destroy,
+ .status = ioa_status,
+ .add_path = ioa_add_path,
+ .fail_path = ioa_fail_path,
+ .reinstate_path = ioa_reinstate_path,
+ .select_path = ioa_select_path,
+};
+
+static int __init dm_ioa_init(void)
+{
+ int ret = dm_register_path_selector(&ioa_ps);
+
+ if (ret < 0)
+ DMERR("register failed %d", ret);
+ return ret;
+}
+
+static void __exit dm_ioa_exit(void)
+{
+ int ret = dm_unregister_path_selector(&ioa_ps);
+
+ if (ret < 0)
+ DMERR("unregister failed %d", ret);
+}
+
+module_init(dm_ioa_init);
+module_exit(dm_ioa_exit);
+
+MODULE_DESCRIPTION(DM_NAME " multipath path selector that selects paths based on the CPU IO is being executed on");
+MODULE_AUTHOR("Mike Christie <michael.christie@oracle.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-ps-queue-length.c
index 5fd018d18418..5fd018d18418 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-ps-queue-length.c
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-ps-round-robin.c
index bdbb7e6e8212..bdbb7e6e8212 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-ps-round-robin.c
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-ps-service-time.c
index 9cfda665e9eb..9cfda665e9eb 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-ps-service-time.c
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 56b723d012ac..cab12b2251ba 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -700,8 +700,7 @@ static void rs_set_capacity(struct raid_set *rs)
{
struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
- set_capacity(gendisk, rs->md.array_sectors);
- revalidate_disk_size(gendisk, true);
+ set_capacity_and_notify(gendisk, rs->md.array_sectors);
}
/*
@@ -3730,10 +3729,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
/*
- * RAID1 and RAID10 personalities require bio splitting,
- * RAID0/4/5/6 don't and process large discard bios properly.
+ * RAID0 and RAID10 personalities require bio splitting,
+ * RAID1/4/5/6 don't and process large discard bios properly.
*/
- if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+ if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
limits->discard_granularity = chunk_size_bytes;
limits->max_discard_sectors = rs->md.chunk_sectors;
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 729a72ec30cc..13b4385f4d5a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -397,7 +397,7 @@ static int map_request(struct dm_rq_target_io *tio)
}
/* The target has remapped the I/O so dispatch it */
- trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
+ trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
ret = dm_dispatch_clone_request(clone, rq);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 4668b2cd98f4..11890db71f3f 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -141,6 +141,11 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
+
+ /*
+ * Flush data after merge.
+ */
+ struct bio flush_bio;
};
/*
@@ -1121,6 +1126,17 @@ shut:
static void error_bios(struct bio *bio);
+static int flush_data(struct dm_snapshot *s)
+{
+ struct bio *flush_bio = &s->flush_bio;
+
+ bio_reset(flush_bio);
+ bio_set_dev(flush_bio, s->origin->bdev);
+ flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+ return submit_bio_wait(flush_bio);
+}
+
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
+ if (flush_data(s) < 0) {
+ DMERR("Flush after merge failed: shutting down merge");
+ goto shut;
+ }
+
if (s->store->type->commit_merge(s->store,
s->num_merging_chunks) < 0) {
DMERR("Write error in exception store: shutting down merge");
@@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
+ bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ bio_uninit(&s->flush_bio);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 151d022b032d..df359d33cda8 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -496,7 +496,7 @@ static void stripe_io_hints(struct dm_target *ti,
static struct target_type stripe_target = {
.name = "striped",
.version = {1, 6, 0},
- .features = DM_TARGET_PASSES_INTEGRITY,
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT,
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index bff4c7fa1cd2..262e2b0fd975 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -550,6 +550,7 @@ static int switch_iterate_devices(struct dm_target *ti,
static struct target_type switch_target = {
.name = "switch",
.version = {1, 1, 0},
+ .features = DM_TARGET_NOWAIT,
.module = THIS_MODULE,
.ctr = switch_ctr,
.dtr = switch_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 7eeb7c4169c9..4acf2342f7ad 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -347,16 +347,9 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
dev_t dm_get_dev_t(const char *path)
{
dev_t dev;
- struct block_device *bdev;
- bdev = lookup_bdev(path);
- if (IS_ERR(bdev))
+ if (lookup_bdev(path, &dev))
dev = name_to_dev_t(path);
- else {
- dev = bdev->bd_dev;
- bdput(bdev);
- }
-
return dev;
}
EXPORT_SYMBOL_GPL(dm_get_dev_t);
@@ -370,14 +363,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
{
int r;
dev_t dev;
+ unsigned int major, minor;
+ char dummy;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
BUG_ON(!t);
- dev = dm_get_dev_t(path);
- if (!dev)
- return -ENODEV;
+ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
+ /* Extract the major/minor numbers */
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor)
+ return -EOVERFLOW;
+ } else {
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
+ }
dd = find_device(&t->devices, dev);
if (!dd) {
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
index e673dacf6418..7357c1bd5863 100644
--- a/drivers/md/dm-unstripe.c
+++ b/drivers/md/dm-unstripe.c
@@ -178,6 +178,7 @@ static void unstripe_io_hints(struct dm_target *ti,
static struct target_type unstripe_target = {
.name = "unstriped",
.version = {1, 1, 0},
+ .features = DM_TARGET_NOWAIT,
.module = THIS_MODULE,
.ctr = unstripe_ctr,
.dtr = unstripe_dtr,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index f74982dcbea0..6b8e5bdd8526 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -538,6 +538,15 @@ static int verity_verify_io(struct dm_verity_io *io)
}
/*
+ * Skip verity work in response to I/O error when system is shutting down.
+ */
+static inline bool verity_is_system_shutting_down(void)
+{
+ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+ || system_state == SYSTEM_RESTART;
+}
+
+/*
* End one "io" structure with a given error.
*/
static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
@@ -564,7 +573,8 @@ static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
- if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
+ if (bio->bi_status &&
+ (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
verity_finish_io(io, bio->bi_status);
return;
}
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index 614e43db93aa..29385dc470d5 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -119,8 +119,13 @@ int verity_verify_root_hash(const void *root_hash, size_t root_hash_len,
}
ret = verify_pkcs7_signature(root_hash, root_hash_len, sig_data,
- sig_len, NULL, VERIFYING_UNSPECIFIED_SIGNATURE,
- NULL, NULL);
+ sig_len,
+#ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING
+ VERIFY_USE_SECONDARY_KEYRING,
+#else
+ NULL,
+#endif
+ VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL);
return ret;
}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index b65ca8dcfbdc..faa1dbffc8b4 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -59,6 +59,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
static struct target_type zero_target = {
.name = "zero",
.version = {1, 1, 0},
+ .features = DM_TARGET_NOWAIT,
.module = THIS_MODULE,
.ctr = zero_ctr,
.map = zero_map,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4e0cbfe3f14d..7bac564f3faa 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -562,7 +562,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
* subset of the parent bdev; require extra privileges.
*/
if (!capable(CAP_SYS_RAWIO)) {
- DMWARN_LIMIT(
+ DMDEBUG_LIMIT(
"%s: sending ioctl %x to DM device without required privilege.",
current->comm, cmd);
r = -ENOIOCTLCMD;
@@ -570,7 +570,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
}
}
- r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ if (!bdev->bd_disk->fops->ioctl)
+ r = -ENOTTY;
+ else
+ r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
out:
dm_unprepare_ioctl(md, srcu_idx);
return r;
@@ -1274,8 +1277,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
break;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
- trace_block_bio_remap(clone->bi_disk->queue, clone,
- bio_dev(io->orig_bio), sector);
+ trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
ret = submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
@@ -1420,18 +1422,12 @@ static int __send_empty_flush(struct clone_info *ci)
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ flush_bio.bi_disk = ci->io->md->disk;
+ bio_associate_blkg(&flush_bio);
+
ci->bio = &flush_bio;
ci->sector_count = 0;
- /*
- * Empty flush uses a statically initialized bio, as the base for
- * cloning. However, blkg association requires that a bdev is
- * associated with a gendisk, which doesn't happen until the bdev is
- * opened. So, blkg association is done at issue time of the flush
- * rather than when the device is created in alloc_dev().
- */
- bio_set_dev(ci->bio, ci->io->md->bdev);
-
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
@@ -1590,7 +1586,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error) {
error = __split_and_process_non_flush(&ci);
- if (current->bio_list && ci.sector_count && !error) {
+ if (ci.sector_count && !error) {
/*
* Remainder must be passed to submit_bio_noacct()
* so that it gets handled *after* bios already submitted
@@ -1611,12 +1607,12 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* (by eliminating DM's splitting and just using bio_split)
*/
part_stat_lock();
- __dm_part_stat_sub(&dm_disk(md)->part0,
+ __dm_part_stat_sub(dm_disk(md)->part0,
sectors[op_stat_group(bio_op(bio))], ci.sector_count);
part_stat_unlock();
bio_chain(b, bio);
- trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
+ trace_block_split(b, bio->bi_iter.bi_sector);
ret = submit_bio_noacct(bio);
break;
}
@@ -1748,11 +1744,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
cleanup_srcu_struct(&md->io_barrier);
- if (md->bdev) {
- bdput(md->bdev);
- md->bdev = NULL;
- }
-
mutex_destroy(&md->suspend_lock);
mutex_destroy(&md->type_lock);
mutex_destroy(&md->table_devices_lock);
@@ -1844,10 +1835,6 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->wq)
goto bad;
- md->bdev = bdget_disk(md->disk, 0);
- if (!md->bdev)
- goto bad;
-
dm_stats_init(&md->stats);
/* Populate the mapping, nobody knows we exist yet */
@@ -1972,8 +1959,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
- set_capacity(md->disk, size);
- bd_set_nr_sectors(md->bdev, size);
+ set_capacity_and_notify(md->disk, size);
dm_table_event_callback(t, event_callback, md);
@@ -2256,7 +2242,7 @@ EXPORT_SYMBOL_GPL(dm_put);
static bool md_in_flight_bios(struct mapped_device *md)
{
int cpu;
- struct hd_struct *part = &dm_disk(md)->part0;
+ struct block_device *part = dm_disk(md)->part0;
long sum = 0;
for_each_possible_cpu(cpu) {
@@ -2391,27 +2377,19 @@ static int lock_fs(struct mapped_device *md)
{
int r;
- WARN_ON(md->frozen_sb);
+ WARN_ON(test_bit(DMF_FROZEN, &md->flags));
- md->frozen_sb = freeze_bdev(md->bdev);
- if (IS_ERR(md->frozen_sb)) {
- r = PTR_ERR(md->frozen_sb);
- md->frozen_sb = NULL;
- return r;
- }
-
- set_bit(DMF_FROZEN, &md->flags);
-
- return 0;
+ r = freeze_bdev(md->disk->part0);
+ if (!r)
+ set_bit(DMF_FROZEN, &md->flags);
+ return r;
}
static void unlock_fs(struct mapped_device *md)
{
if (!test_bit(DMF_FROZEN, &md->flags))
return;
-
- thaw_bdev(md->bdev, md->frozen_sb);
- md->frozen_sb = NULL;
+ thaw_bdev(md->disk->part0);
clear_bit(DMF_FROZEN, &md->flags);
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 4aaf4820b6f6..7fbd41e156c9 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -581,8 +581,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
process_metadata_update(mddev, msg);
break;
case CHANGE_CAPACITY:
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk_size(mddev->gendisk, true);
+ set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
break;
case RESYNCING:
set_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
@@ -664,9 +663,27 @@ out:
* Takes the lock on the TOKEN lock resource so no other
* node can communicate while the operation is underway.
*/
-static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
+static int lock_token(struct md_cluster_info *cinfo)
{
- int error, set_bit = 0;
+ int error;
+
+ error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
+ if (error) {
+ pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
+ __func__, __LINE__, error);
+ } else {
+ /* Lock the receive sequence */
+ mutex_lock(&cinfo->recv_mutex);
+ }
+ return error;
+}
+
+/* lock_comm()
+ * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
+ */
+static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
+{
+ int rv, set_bit = 0;
struct mddev *mddev = cinfo->mddev;
/*
@@ -677,34 +694,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
*/
if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
&cinfo->state)) {
- error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
+ rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
&cinfo->state);
- WARN_ON_ONCE(error);
+ WARN_ON_ONCE(rv);
md_wakeup_thread(mddev->thread);
set_bit = 1;
}
- error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
- if (set_bit)
- clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
- if (error)
- pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
- __func__, __LINE__, error);
-
- /* Lock the receive sequence */
- mutex_lock(&cinfo->recv_mutex);
- return error;
-}
-
-/* lock_comm()
- * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
- */
-static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
-{
wait_event(cinfo->wait,
!test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
-
- return lock_token(cinfo, mddev_locked);
+ rv = lock_token(cinfo);
+ if (set_bit)
+ clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
+ return rv;
}
static void unlock_comm(struct md_cluster_info *cinfo)
@@ -784,9 +786,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg,
{
int ret;
- lock_comm(cinfo, mddev_locked);
- ret = __sendmsg(cinfo, cmsg);
- unlock_comm(cinfo);
+ ret = lock_comm(cinfo, mddev_locked);
+ if (!ret) {
+ ret = __sendmsg(cinfo, cmsg);
+ unlock_comm(cinfo);
+ }
return ret;
}
@@ -1061,7 +1065,7 @@ static int metadata_update_start(struct mddev *mddev)
return 0;
}
- ret = lock_token(cinfo, 1);
+ ret = lock_token(cinfo);
clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
return ret;
}
@@ -1255,7 +1259,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
int raid_slot = -1;
md_update_sb(mddev, 1);
- lock_comm(cinfo, 1);
+ if (lock_comm(cinfo, 1)) {
+ pr_err("%s: lock_comm failed\n", __func__);
+ return;
+ }
memset(&cmsg, 0, sizeof(cmsg));
cmsg.type = cpu_to_le32(METADATA_UPDATED);
@@ -1296,13 +1303,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
if (ret)
pr_err("%s:%d: failed to send CHANGE_CAPACITY msg\n",
__func__, __LINE__);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk_size(mddev->gendisk, true);
+ set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
} else {
/* revert to previous sectors */
ret = mddev->pers->resize(mddev, old_dev_sectors);
- if (!ret)
- revalidate_disk_size(mddev->gendisk, true);
ret = __sendmsg(cinfo, &cmsg);
if (ret)
pr_err("%s:%d: failed to send METADATA_UPDATED msg\n",
@@ -1407,7 +1411,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
cmsg.type = cpu_to_le32(NEWDISK);
memcpy(cmsg.uuid, uuid, 16);
cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
- lock_comm(cinfo, 1);
+ if (lock_comm(cinfo, 1))
+ return -EAGAIN;
ret = __sendmsg(cinfo, &cmsg);
if (ret) {
unlock_comm(cinfo);
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 5ab22069b5be..68cac7d19278 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -200,9 +200,8 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
"copied raid_disks doesn't match mddev->raid_disks");
rcu_assign_pointer(mddev->private, newconf);
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
- set_capacity(mddev->gendisk, mddev->array_sectors);
+ set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
mddev_resume(mddev);
- revalidate_disk_size(mddev->gendisk, true);
kfree_rcu(oldconf, rcu);
return 0;
}
@@ -258,8 +257,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
} else {
if (mddev->gendisk)
- trace_block_bio_remap(bio->bi_disk->queue,
- bio, disk_devt(mddev->gendisk),
+ trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
bio_sector);
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0037c6ecab65..04384452a7ab 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -464,7 +464,7 @@ struct md_io {
bio_end_io_t *orig_bi_end_io;
void *orig_bi_private;
unsigned long start_time;
- struct hd_struct *part;
+ struct block_device *part;
};
static void md_end_io(struct bio *bio)
@@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws)
* could wait for this and below md_handle_request could wait for those
* bios because of suspend check
*/
- mddev->last_flush = mddev->start_flush;
+ spin_lock_irq(&mddev->lock);
+ mddev->prev_flush_start = mddev->start_flush;
mddev->flush_bio = NULL;
+ spin_unlock_irq(&mddev->lock);
wake_up(&mddev->sb_wait);
if (bio->bi_iter.bi_size == 0) {
@@ -660,13 +662,17 @@ static void md_submit_flush_data(struct work_struct *ws)
*/
bool md_flush_request(struct mddev *mddev, struct bio *bio)
{
- ktime_t start = ktime_get_boottime();
+ ktime_t req_start = ktime_get_boottime();
spin_lock_irq(&mddev->lock);
+ /* flush requests wait until ongoing flush completes,
+ * hence coalescing all the pending requests.
+ */
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio ||
- ktime_after(mddev->last_flush, start),
+ ktime_before(req_start, mddev->prev_flush_start),
mddev->lock);
- if (!ktime_after(mddev->last_flush, start)) {
+ /* new request after previous flush is completed */
+ if (ktime_after(req_start, mddev->prev_flush_start)) {
WARN_ON(mddev->flush_bio);
mddev->flush_bio = bio;
bio = NULL;
@@ -2414,7 +2420,6 @@ EXPORT_SYMBOL(md_integrity_add_rdev);
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
{
char b[BDEVNAME_SIZE];
- struct kobject *ko;
int err;
/* prevent duplicates */
@@ -2477,9 +2482,8 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
- ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
/* failure here is OK */
- err = sysfs_create_link(&rdev->kobj, ko, "block");
+ err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
rdev->sysfs_unack_badblocks =
sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
@@ -5355,10 +5359,9 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len)
if (!err) {
mddev->array_sectors = sectors;
- if (mddev->pers) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk_size(mddev->gendisk, true);
- }
+ if (mddev->pers)
+ set_capacity_and_notify(mddev->gendisk,
+ mddev->array_sectors);
}
mddev_unlock(mddev);
return err ?: len;
@@ -5765,11 +5768,12 @@ static int md_alloc(dev_t dev, char *name)
return error;
}
-static struct kobject *md_probe(dev_t dev, int *part, void *data)
+static void md_probe(dev_t dev)
{
+ if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
+ return;
if (create_on_open)
md_alloc(dev, NULL);
- return NULL;
}
static int add_named_array(const char *val, const struct kernel_param *kp)
@@ -6107,8 +6111,7 @@ int do_md_run(struct mddev *mddev)
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk_size(mddev->gendisk, true);
+ set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
clear_bit(MD_NOT_READY, &mddev->flags);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
@@ -6423,10 +6426,9 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (rdev->raid_disk >= 0)
sysfs_unlink_rdev(mddev, rdev);
- set_capacity(disk, 0);
+ set_capacity_and_notify(disk, 0);
mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
- revalidate_disk_size(disk, true);
if (mddev->ro)
mddev->ro = 0;
@@ -6535,7 +6537,7 @@ static void autorun_devices(int part)
break;
}
- md_probe(dev, NULL, NULL);
+ md_probe(dev);
mddev = mddev_find(dev);
if (!mddev || !mddev->gendisk) {
if (mddev)
@@ -6948,8 +6950,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
goto busy;
kick_rdev:
- if (mddev_is_clustered(mddev))
- md_cluster_ops->remove_disk(mddev, rdev);
+ if (mddev_is_clustered(mddev)) {
+ if (md_cluster_ops->remove_disk(mddev, rdev))
+ goto busy;
+ }
md_kick_rdev_from_array(rdev);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
@@ -7257,8 +7261,8 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
if (mddev_is_clustered(mddev))
md_cluster_ops->update_size(mddev, old_dev_sectors);
else if (mddev->queue) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk_size(mddev->gendisk, true);
+ set_capacity_and_notify(mddev->gendisk,
+ mddev->array_sectors);
}
}
return rv;
@@ -7278,6 +7282,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
return -EINVAL;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -7480,7 +7485,6 @@ static inline bool md_ioctl_valid(unsigned int cmd)
{
switch (cmd) {
case ADD_NEW_DISK:
- case BLKROSET:
case GET_ARRAY_INFO:
case GET_BITMAP_FILE:
case GET_DISK_INFO:
@@ -7507,7 +7511,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
int err = 0;
void __user *argp = (void __user *)arg;
struct mddev *mddev = NULL;
- int ro;
bool did_set_md_closing = false;
if (!md_ioctl_valid(cmd))
@@ -7590,8 +7593,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
err = -EBUSY;
goto out;
}
- WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
- set_bit(MD_CLOSING, &mddev->flags);
+ if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
+ mutex_unlock(&mddev->open_mutex);
+ err = -EBUSY;
+ goto out;
+ }
did_set_md_closing = true;
mutex_unlock(&mddev->open_mutex);
sync_blockdev(bdev);
@@ -7687,35 +7693,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto unlock;
}
break;
-
- case BLKROSET:
- if (get_user(ro, (int __user *)(arg))) {
- err = -EFAULT;
- goto unlock;
- }
- err = -EINVAL;
-
- /* if the bdev is going readonly the value of mddev->ro
- * does not matter, no writes are coming
- */
- if (ro)
- goto unlock;
-
- /* are we are already prepared for writes? */
- if (mddev->ro != 1)
- goto unlock;
-
- /* transitioning to readauto need only happen for
- * arrays that call md_write_start
- */
- if (mddev->pers) {
- err = restart_array(mddev);
- if (err == 0) {
- mddev->ro = 2;
- set_disk_ro(mddev->gendisk, 0);
- }
- }
- goto unlock;
}
/*
@@ -7809,6 +7786,36 @@ static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
}
#endif /* CONFIG_COMPAT */
+static int md_set_read_only(struct block_device *bdev, bool ro)
+{
+ struct mddev *mddev = bdev->bd_disk->private_data;
+ int err;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+
+ if (!mddev->raid_disks && !mddev->external) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+
+ /*
+ * Transitioning to read-auto need only happen for arrays that call
+ * md_write_start and which are not ready for writes yet.
+ */
+ if (!ro && mddev->ro == 1 && mddev->pers) {
+ err = restart_array(mddev);
+ if (err)
+ goto out_unlock;
+ mddev->ro = 2;
+ }
+
+out_unlock:
+ mddev_unlock(mddev);
+ return err;
+}
+
static int md_open(struct block_device *bdev, fmode_t mode)
{
/*
@@ -7886,6 +7893,7 @@ const struct block_device_operations md_fops =
#endif
.getgeo = md_getgeo,
.check_events = md_check_events,
+ .set_read_only = md_set_read_only,
};
static int md_thread(void *arg)
@@ -8445,7 +8453,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_disk;
- curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
+ curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
@@ -9015,10 +9023,9 @@ void md_do_sync(struct md_thread *thread)
mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev);
- if (!mddev_is_clustered(mddev)) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk_size(mddev->gendisk, true);
- }
+ if (!mddev_is_clustered(mddev))
+ set_capacity_and_notify(mddev->gendisk,
+ mddev->array_sectors);
}
spin_lock(&mddev->lock);
@@ -9547,18 +9554,15 @@ static int __init md_init(void)
if (!md_rdev_misc_wq)
goto err_rdev_misc_wq;
- if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
+ ret = __register_blkdev(MD_MAJOR, "md", md_probe);
+ if (ret < 0)
goto err_md;
- if ((ret = register_blkdev(0, "mdp")) < 0)
+ ret = __register_blkdev(0, "mdp", md_probe);
+ if (ret < 0)
goto err_mdp;
mdp_major = ret;
- blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
- md_probe, NULL, NULL);
- blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
- md_probe, NULL, NULL);
-
register_reboot_notifier(&md_notifier);
raid_table_header = register_sysctl_table(raid_root_table);
@@ -9642,8 +9646,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
}
}
- if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
- update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
+ if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
+ ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
+ if (ret)
+ pr_warn("md: updating array disks failed. %d\n", ret);
+ }
/*
* Since mddev->delta_disks has already updated in update_raid_disks,
@@ -9825,9 +9832,6 @@ static __exit void md_exit(void)
struct list_head *tmp;
int delay = 1;
- blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
- blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
-
unregister_blkdev(MD_MAJOR,"md");
unregister_blkdev(mdp_major, "mdp");
unregister_reboot_notifier(&md_notifier);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2175a5ac4f7c..34070ab30a8a 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -495,9 +495,9 @@ struct mddev {
*/
struct bio *flush_bio;
atomic_t flush_pending;
- ktime_t start_flush, last_flush; /* last_flush is when the last completed
- * flush was started.
- */
+ ktime_t start_flush, prev_flush_start; /* prev_flush_start is when the previous completed
+ * flush was started.
+ */
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
mempool_t *serial_info_pool;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 35843df15b5e..67f157f2525d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -508,8 +508,8 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
bio_chain(discard_bio, bio);
bio_clone_blkg_association(discard_bio, bio);
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(rdev->bdev),
- discard_bio, disk_devt(mddev->gendisk),
+ trace_block_bio_remap(discard_bio,
+ disk_devt(mddev->gendisk),
bio->bi_iter.bi_sector);
submit_bio_noacct(discard_bio);
}
@@ -581,8 +581,8 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
tmp_dev->data_offset;
if (mddev->gendisk)
- trace_block_bio_remap(bio->bi_disk->queue, bio,
- disk_devt(mddev->gendisk), bio_sector);
+ trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
+ bio_sector);
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
submit_bio_noacct(bio);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 960d854c07f8..c0347997f6ff 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1305,8 +1305,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r1_bio;
if (mddev->gendisk)
- trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
- disk_devt(mddev->gendisk), r1_bio->sector);
+ trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
submit_bio_noacct(read_bio);
}
@@ -1517,8 +1517,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
atomic_inc(&r1_bio->remaining);
if (mddev->gendisk)
- trace_block_bio_remap(mbio->bi_disk->queue,
- mbio, disk_devt(mddev->gendisk),
+ trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_disk = (void *)conf->mirrors[i].rdev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b598a3cb462..c5d88ef6a45c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1128,7 +1128,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
- if (r10_bio->devs[slot].rdev) {
+ if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
* This is an error retry, but we cannot
* safely dereference the rdev in the r10_bio,
@@ -1201,8 +1201,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r10_bio;
if (mddev->gendisk)
- trace_block_bio_remap(read_bio->bi_disk->queue,
- read_bio, disk_devt(mddev->gendisk),
+ trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
r10_bio->sector);
submit_bio_noacct(read_bio);
return;
@@ -1251,8 +1250,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_private = r10_bio;
if (conf->mddev->gendisk)
- trace_block_bio_remap(mbio->bi_disk->queue,
- mbio, disk_devt(conf->mddev->gendisk),
+ trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_disk = (void *)rdev;
@@ -1493,6 +1491,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0;
+ r10_bio->read_slot = -1;
memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
if (bio_data_dir(bio) == READ)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 39343479ac2a..3a90cc0e43ca 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1222,9 +1222,9 @@ again:
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
if (conf->mddev->gendisk)
- trace_block_bio_remap(bi->bi_disk->queue,
- bi, disk_devt(conf->mddev->gendisk),
- sh->dev[i].sector);
+ trace_block_bio_remap(bi,
+ disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, bi);
else
@@ -1272,9 +1272,9 @@ again:
if (op == REQ_OP_DISCARD)
rbi->bi_vcnt = 0;
if (conf->mddev->gendisk)
- trace_block_bio_remap(rbi->bi_disk->queue,
- rbi, disk_devt(conf->mddev->gendisk),
- sh->dev[i].sector);
+ trace_block_bio_remap(rbi,
+ disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, rbi);
else
@@ -5468,8 +5468,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
spin_unlock_irq(&conf->device_lock);
if (mddev->gendisk)
- trace_block_bio_remap(align_bi->bi_disk->queue,
- align_bi, disk_devt(mddev->gendisk),
+ trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk),
raid_bio->bi_iter.bi_sector);
submit_bio_noacct(align_bi);
return 1;
diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
index 3a947159b25a..ea6f8ee8161c 100644
--- a/drivers/media/cec/platform/Makefile
+++ b/drivers/media/cec/platform/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_CEC_MESON_AO) += meson/
obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
obj-$(CONFIG_CEC_SECO) += seco/
obj-$(CONFIG_CEC_STI) += sti/
+obj-$(CONFIG_CEC_STM32) += stm32/
obj-$(CONFIG_CEC_TEGRA) += tegra/
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 96d3b2b2aa31..3f61f5863bf7 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -118,8 +118,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
return -EINVAL;
}
} else {
- length = (b->memory == VB2_MEMORY_USERPTR ||
- b->memory == VB2_MEMORY_DMABUF)
+ length = (b->memory == VB2_MEMORY_USERPTR)
? b->length : vb->planes[0].length;
if (b->bytesused > length)
diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c
index eb7b6f01f623..58ca47e904a1 100644
--- a/drivers/media/i2c/ccs-pll.c
+++ b/drivers/media/i2c/ccs-pll.c
@@ -772,14 +772,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
switch (pll->bus_type) {
case CCS_PLL_BUS_TYPE_CSI2_DPHY:
- /* CSI transfers 2 bits per clock per lane; thus times 2 */
- op_sys_clk_freq_hz_sdr = pll->link_freq * 2
- * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
- 1 : pll->csi2.lanes);
- break;
case CCS_PLL_BUS_TYPE_CSI2_CPHY:
- op_sys_clk_freq_hz_sdr =
- pll->link_freq
+ op_sys_clk_freq_hz_sdr = pll->link_freq * 2
* (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
1 : pll->csi2.lanes);
break;
diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
index 9a6097b088bd..6555bd4b325a 100644
--- a/drivers/media/i2c/ccs/ccs-data.c
+++ b/drivers/media/i2c/ccs/ccs-data.c
@@ -152,7 +152,7 @@ static int ccs_data_parse_version(struct bin_container *bin,
vv->version_major = ((u16)v->static_data_version_major[0] << 8) +
v->static_data_version_major[1];
vv->version_minor = ((u16)v->static_data_version_minor[0] << 8) +
- v->static_data_version_major[1];
+ v->static_data_version_minor[1];
vv->date_year = ((u16)v->year[0] << 8) + v->year[1];
vv->date_month = v->month;
vv->date_day = v->day;
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 36e354ecf71e..6cada8a6e50c 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
if (!q->sensor)
return -ENODEV;
- freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes);
+ freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
if (freq < 0) {
dev_err(dev, "error %lld, invalid link_freq\n", freq);
return freq;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index bdd293faaad0..7233a7311757 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -349,8 +349,10 @@ static void venus_core_shutdown(struct platform_device *pdev)
{
struct venus_core *core = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(core->dev);
venus_shutdown(core);
venus_firmware_deinit(core);
+ pm_runtime_put_sync(core->dev);
}
static __maybe_unused int venus_runtime_suspend(struct device *dev)
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 98bff765b02e..e48d666f2c63 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -654,7 +654,7 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
out:
fwnode_handle_put(fwnode);
- return 0;
+ return ret;
}
static int rvin_parallel_init(struct rvin_dev *vin)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 68da1eed753d..f7e9fd305548 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -104,6 +104,7 @@
struct rkisp1_match_data {
const char * const *clks;
unsigned int size;
+ enum rkisp1_cif_isp_version isp_ver;
};
/* ----------------------------------------------------------------------------
@@ -411,15 +412,16 @@ static const char * const rk3399_isp_clks[] = {
"hclk",
};
-static const struct rkisp1_match_data rk3399_isp_clk_data = {
+static const struct rkisp1_match_data rk3399_isp_match_data = {
.clks = rk3399_isp_clks,
.size = ARRAY_SIZE(rk3399_isp_clks),
+ .isp_ver = RKISP1_V10,
};
static const struct of_device_id rkisp1_of_match[] = {
{
.compatible = "rockchip,rk3399-cif-isp",
- .data = &rk3399_isp_clk_data,
+ .data = &rk3399_isp_match_data,
},
{},
};
@@ -457,15 +459,15 @@ static void rkisp1_debug_init(struct rkisp1_device *rkisp1)
static int rkisp1_probe(struct platform_device *pdev)
{
- const struct rkisp1_match_data *clk_data;
+ const struct rkisp1_match_data *match_data;
struct device *dev = &pdev->dev;
struct rkisp1_device *rkisp1;
struct v4l2_device *v4l2_dev;
unsigned int i;
int ret, irq;
- clk_data = of_device_get_match_data(&pdev->dev);
- if (!clk_data)
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
return -ENODEV;
rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
@@ -494,15 +496,16 @@ static int rkisp1_probe(struct platform_device *pdev)
rkisp1->irq = irq;
- for (i = 0; i < clk_data->size; i++)
- rkisp1->clks[i].id = clk_data->clks[i];
- ret = devm_clk_bulk_get(dev, clk_data->size, rkisp1->clks);
+ for (i = 0; i < match_data->size; i++)
+ rkisp1->clks[i].id = match_data->clks[i];
+ ret = devm_clk_bulk_get(dev, match_data->size, rkisp1->clks);
if (ret)
return ret;
- rkisp1->clk_size = clk_data->size;
+ rkisp1->clk_size = match_data->size;
pm_runtime_enable(&pdev->dev);
+ rkisp1->media_dev.hw_revision = match_data->isp_ver;
strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
sizeof(rkisp1->media_dev.model));
rkisp1->media_dev.dev = &pdev->dev;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index 6af4d551ffb5..aa5f45749543 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -391,7 +391,7 @@ static void rkisp1_goc_config(struct rkisp1_params *params,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
- for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES; i++)
+ for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
rkisp1_write(params->rkisp1, arg->gamma_y[i],
RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
}
@@ -589,7 +589,6 @@ static void rkisp1_hst_config(struct rkisp1_params *params,
RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
- RKISP1_CIF_ISP_HIST_WEIGHT_44,
};
const u8 *weight;
unsigned int i;
@@ -622,6 +621,8 @@ static void rkisp1_hst_config(struct rkisp1_params *params,
weight[2],
weight[3]),
hist_weight_regs[i]);
+
+ rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44);
}
static void
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index 8a8d960a679c..fa33080f51db 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -365,6 +365,7 @@
#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER 0x0000007F
#define RKISP1_CIF_ISP_HIST_ROW_NUM 5
#define RKISP1_CIF_ISP_HIST_COLUMN_NUM 5
+#define RKISP1_CIF_ISP_HIST_GET_BIN(x) ((x) & 0x000FFFFF)
/* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */
#define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
index 3ddab8fa8f2d..c1d07a2e8839 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
@@ -203,7 +203,7 @@ static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
- for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX; i++)
+ for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++)
pbuf->params.ae.exp_mean[i] =
(u8)rkisp1_read(rkisp1,
RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
@@ -233,10 +233,11 @@ static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats,
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
- for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX; i++)
- pbuf->params.hist.hist_bins[i] =
- (u8)rkisp1_read(rkisp1,
- RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+ for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) {
+ u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+
+ pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN(reg_val);
+ }
}
static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index eb15c8c725ca..ec46cff80fdb 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -167,33 +167,6 @@ static int sun4i_csi_probe(struct platform_device *pdev)
if (!csi->traits)
return -EINVAL;
- /*
- * On Allwinner SoCs, some high memory bandwidth devices do DMA
- * directly over the memory bus (called MBUS), instead of the
- * system bus. The memory bus has a different addressing scheme
- * without the DRAM starting offset.
- *
- * In some cases this can be described by an interconnect in
- * the device tree. In other cases where the hardware is not
- * fully understood and the interconnect is left out of the
- * device tree, fall back to a default offset.
- */
- if (of_find_property(csi->dev->of_node, "interconnects", NULL)) {
- ret = of_dma_configure(csi->dev, csi->dev->of_node, true);
- if (ret)
- return ret;
- } else {
- /*
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- */
-#ifdef PHYS_PFN_OFFSET
- ret = dma_direct_set_offset(csi->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
- return ret;
-#endif
- }
-
csi->mdev.dev = csi->dev;
strscpy(csi->mdev.model, "Allwinner Video Capture Device",
sizeof(csi->mdev.model));
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index e69e14379fc6..27935f1e9555 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -881,14 +881,6 @@ static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
return 0;
}
-/*
- * PHYS_OFFSET isn't available on all architectures. In order to
- * accommodate for COMPILE_TEST, let's define it to something dumb.
- */
-#if defined(CONFIG_COMPILE_TEST) && !defined(PHYS_OFFSET)
-#define PHYS_OFFSET 0
-#endif
-
static int sun6i_csi_probe(struct platform_device *pdev)
{
struct sun6i_csi_dev *sdev;
@@ -899,15 +891,6 @@ static int sun6i_csi_probe(struct platform_device *pdev)
return -ENOMEM;
sdev->dev = &pdev->dev;
- /*
- * The DMA bus has the memory mapped at 0.
- *
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- */
- ret = dma_direct_set_offset(sdev->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
- return ret;
ret = sun6i_csi_resource_request(sdev, pdev);
if (ret)
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index ba5d07886607..ed863bf5ea80 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -825,10 +825,6 @@ static int deinterlace_probe(struct platform_device *pdev)
return ret;
}
- ret = of_dma_configure(dev->dev, dev->dev->of_node, true);
- if (ret)
- return ret;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dev->base))
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index be8f2756a444..1524dc0fc566 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -320,7 +320,7 @@ again:
data->body);
spin_lock(&data->keylock);
if (scancode) {
- delay = nsecs_to_jiffies(dev->timeout) +
+ delay = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(100);
mod_timer(&data->rx_timeout, jiffies + delay);
} else {
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index a905113fef6e..0c6229592e13 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1551,7 +1551,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
/* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
- itdev->params.sample_period;
+ itdev->params.sample_period / 1000;
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 1d811e5ffb55..1fd62c1dac76 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -737,7 +737,7 @@ static unsigned int repeat_period(int protocol)
void rc_repeat(struct rc_dev *dev)
{
unsigned long flags;
- unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
+ unsigned int timeout = usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(dev->last_protocol));
struct lirc_scancode sc = {
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
@@ -855,7 +855,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
ir_do_keydown(dev, protocol, scancode, keycode, toggle);
if (dev->keypressed) {
- dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
+ dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(repeat_period(protocol));
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
@@ -1928,6 +1928,8 @@ int rc_register_device(struct rc_dev *dev)
goto out_raw;
}
+ dev->registered = true;
+
rc = device_add(&dev->dev);
if (rc)
goto out_rx_free;
@@ -1937,8 +1939,6 @@ int rc_register_device(struct rc_dev *dev)
dev->device_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- dev->registered = true;
-
/*
* once the the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 8cc28c92d05d..96ae0294ac10 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -385,7 +385,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
mod_timer(&serial_ir.timeout_timer,
- jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
+ jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
ir_raw_event_handle(serial_ir.rcdev);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 78007dba4677..133d20e40f82 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -442,7 +442,7 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
unsigned int div)
{
struct v4l2_ctrl *ctrl;
@@ -473,4 +473,4 @@ s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
return freq > 0 ? freq : -EINVAL;
}
-EXPORT_SYMBOL_GPL(v4l2_get_link_rate);
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 00e013b14703..3ea6913df176 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -128,7 +128,7 @@ config OMAP_GPMC_DEBUG
config TI_EMIF_SRAM
tristate "Texas Instruments EMIF SRAM driver"
- depends on SOC_AM33XX || SOC_AM43XX || (ARM && COMPILE_TEST)
+ depends on SOC_AM33XX || SOC_AM43XX || (ARM && CPU_V7 && COMPILE_TEST)
depends on SRAM
help
This driver is for the EMIF module available on Texas Instruments
@@ -191,8 +191,8 @@ config DA8XX_DDRCTL
config PL353_SMC
tristate "ARM PL35X Static Memory Controller(SMC) driver"
default y if ARM
- depends on ARM
- depends on ARM_AMBA || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
+ depends on ARM_AMBA
help
This driver is for the ARM PL351/PL353 Static Memory
Controller(SMC) module.
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
index 3ec5cb0fce1e..555f7ac3b7dd 100644
--- a/drivers/memory/jz4780-nemc.c
+++ b/drivers/memory/jz4780-nemc.c
@@ -291,6 +291,8 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
nemc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
/*
* The driver currently only uses the registers up to offset
@@ -304,9 +306,9 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
}
nemc->base = devm_ioremap(dev, res->start, NEMC_REG_LEN);
- if (IS_ERR(nemc->base)) {
+ if (!nemc->base) {
dev_err(dev, "failed to get I/O memory\n");
- return PTR_ERR(nemc->base);
+ return -ENOMEM;
}
writel(0, nemc->base + NEMC_NFCSR);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 691e4c344cf8..ac350f8d1e20 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -268,6 +268,10 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
/* IPU0 | IPU1 | CCU */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+};
+
static const struct of_device_id mtk_smi_larb_of_ids[] = {
{
.compatible = "mediatek,mt8167-smi-larb",
@@ -293,6 +297,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
.compatible = "mediatek,mt8183-smi-larb",
.data = &mtk_smi_larb_mt8183
},
+ {
+ .compatible = "mediatek,mt8192-smi-larb",
+ .data = &mtk_smi_larb_mt8192
+ },
{}
};
@@ -432,6 +440,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
F_MMU1_LARB(7),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
+ .gen = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(6),
+};
+
static const struct of_device_id mtk_smi_common_of_ids[] = {
{
.compatible = "mediatek,mt8173-smi-common",
@@ -457,6 +472,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
.compatible = "mediatek,mt8183-smi-common",
.data = &mtk_smi_common_mt8183,
},
+ {
+ .compatible = "mediatek,mt8192-smi-common",
+ .data = &mtk_smi_common_mt8192,
+ },
{}
};
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index f2a33a1af836..8d36e221def1 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -204,18 +203,6 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
}
EXPORT_SYMBOL(rpcif_sw_init);
-void rpcif_enable_rpm(struct rpcif *rpc)
-{
- pm_runtime_enable(rpc->dev);
-}
-EXPORT_SYMBOL(rpcif_enable_rpm);
-
-void rpcif_disable_rpm(struct rpcif *rpc)
-{
- pm_runtime_put_sync(rpc->dev);
-}
-EXPORT_SYMBOL(rpcif_disable_rpm);
-
void rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
{
u32 dummy;
@@ -508,7 +495,8 @@ exit:
return ret;
err_out:
- ret = reset_control_reset(rpc->rstc);
+ if (reset_control_reset(rpc->rstc))
+ dev_err(rpc->dev, "Failed to reset HW\n");
rpcif_hw_init(rpc, rpc->bus_size == 2);
goto exit;
}
@@ -560,9 +548,11 @@ static int rpcif_probe(struct platform_device *pdev)
} else if (of_device_is_compatible(flash, "cfi-flash")) {
name = "rpc-if-hyperflash";
} else {
+ of_node_put(flash);
dev_warn(&pdev->dev, "unknown flash type\n");
return -ENODEV;
}
+ of_node_put(flash);
vdev = platform_device_alloc(name, pdev->id);
if (!vdev)
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index 9f0a96bf9ccc..ca7077a06f4c 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -3,14 +3,17 @@ config TEGRA_MC
bool "NVIDIA Tegra Memory Controller support"
default y
depends on ARCH_TEGRA
+ select INTERCONNECT
help
This driver supports the Memory Controller (MC) hardware found on
NVIDIA Tegra SoCs.
config TEGRA20_EMC
- bool "NVIDIA Tegra20 External Memory Controller driver"
+ tristate "NVIDIA Tegra20 External Memory Controller driver"
default y
- depends on ARCH_TEGRA_2x_SOC
+ depends on TEGRA_MC && ARCH_TEGRA_2x_SOC
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select PM_DEVFREQ
help
This driver is for the External Memory Controller (EMC) found on
Tegra20 chips. The EMC controls the external DRAM on the board.
@@ -18,9 +21,10 @@ config TEGRA20_EMC
external memory.
config TEGRA30_EMC
- bool "NVIDIA Tegra30 External Memory Controller driver"
+ tristate "NVIDIA Tegra30 External Memory Controller driver"
default y
depends on TEGRA_MC && ARCH_TEGRA_3x_SOC
+ select PM_OPP
help
This driver is for the External Memory Controller (EMC) found on
Tegra30 chips. The EMC controls the external DRAM on the board.
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index ec8403557ed4..44064de962c2 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -42,6 +43,54 @@ static const struct of_device_id tegra_mc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
+static void tegra_mc_devm_action_put_device(void *data)
+{
+ struct tegra_mc *mc = data;
+
+ put_device(mc->dev);
+}
+
+/**
+ * devm_tegra_memory_controller_get() - get Tegra Memory Controller handle
+ * @dev: device pointer for the consumer device
+ *
+ * This function will search for the Memory Controller node in a device-tree
+ * and retrieve the Memory Controller handle.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct tegra_mc.
+ */
+struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct tegra_mc *mc;
+ int err;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,memory-controller", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ mc = platform_get_drvdata(pdev);
+ if (!mc) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ err = devm_add_action(dev, tegra_mc_devm_action_put_device, mc);
+ if (err) {
+ put_device(mc->dev);
+ return ERR_PTR(err);
+ }
+
+ return mc;
+}
+EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get);
+
static int tegra_mc_block_dma_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
@@ -298,6 +347,7 @@ int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
return 0;
}
+EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration);
unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
{
@@ -309,6 +359,7 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
return dram_count;
}
+EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count);
static int load_one_timing(struct tegra_mc *mc,
struct tegra_mc_timing *timing,
@@ -591,6 +642,101 @@ static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * Memory Controller (MC) has few Memory Clients that are issuing memory
+ * bandwidth allocation requests to the MC interconnect provider. The MC
+ * provider aggregates the requests and then sends the aggregated request
+ * up to the External Memory Controller (EMC) interconnect provider which
+ * re-configures hardware interface to External Memory (EMEM) in accordance
+ * to the required bandwidth. Each MC interconnect node represents an
+ * individual Memory Client.
+ *
+ * Memory interconnect topology:
+ *
+ * +----+
+ * +--------+ | |
+ * | TEXSRD +--->+ |
+ * +--------+ | |
+ * | | +-----+ +------+
+ * ... | MC +--->+ EMC +--->+ EMEM |
+ * | | +-----+ +------+
+ * +--------+ | |
+ * | DISP.. +--->+ |
+ * +--------+ | |
+ * +----+
+ */
+static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
+{
+ struct icc_node *node;
+ unsigned int i;
+ int err;
+
+ /* older device-trees don't have interconnect properties */
+ if (!device_property_present(mc->dev, "#interconnect-cells") ||
+ !mc->soc->icc_ops)
+ return 0;
+
+ mc->provider.dev = mc->dev;
+ mc->provider.data = &mc->provider;
+ mc->provider.set = mc->soc->icc_ops->set;
+ mc->provider.aggregate = mc->soc->icc_ops->aggregate;
+ mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
+
+ err = icc_provider_add(&mc->provider);
+ if (err)
+ return err;
+
+ /* create Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_MC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
+ }
+
+ node->name = "Memory Controller";
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Controller to External Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_EMC);
+ if (err)
+ goto remove_nodes;
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ /* create MC client node */
+ node = icc_node_create(mc->soc->clients[i].id);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = mc->soc->clients[i].name;
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Client to Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_MC);
+ if (err)
+ goto remove_nodes;
+ }
+
+ /*
+ * MC driver is registered too early, so early that generic driver
+ * syncing doesn't work for the MC. But it doesn't really matter
+ * since syncing works for the EMC drivers, hence we can sync the
+ * MC driver by ourselves and then EMC will complete syncing of
+ * the whole ICC state.
+ */
+ icc_sync_state(mc->dev);
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&mc->provider);
+del_provider:
+ icc_provider_del(&mc->provider);
+
+ return err;
+}
+
static int tegra_mc_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -659,10 +805,8 @@ static int tegra_mc_probe(struct platform_device *pdev)
}
mc->irq = platform_get_irq(pdev, 0);
- if (mc->irq < 0) {
- dev_err(&pdev->dev, "interrupt not specified\n");
+ if (mc->irq < 0)
return mc->irq;
- }
WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
@@ -681,6 +825,11 @@ static int tegra_mc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to register reset controller: %d\n",
err);
+ err = tegra_mc_interconnect_setup(mc);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to initialize interconnect: %d\n",
+ err);
+
if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
if (IS_ERR(mc->smmu)) {
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index afa3ba45c9e6..33e40d600592 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -78,6 +78,20 @@
#define MC_TIMING_UPDATE BIT(0)
+static inline u32 tegra_mc_scale_percents(u64 val, unsigned int percents)
+{
+ val = val * percents;
+ do_div(val, 100);
+
+ return min_t(u64, val, U32_MAX);
+}
+
+static inline struct tegra_mc *
+icc_provider_to_tegra_mc(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_mc, provider);
+}
+
static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
{
return readl_relaxed(mc->regs + offset);
@@ -115,4 +129,12 @@ extern const struct tegra_mc_soc tegra132_mc_soc;
extern const struct tegra_mc_soc tegra210_mc_soc;
#endif
+/*
+ * These IDs are for internal use of Tegra ICC drivers. The ID numbers are
+ * chosen such that they don't conflict with the device-tree ICC node IDs.
+ */
+#define TEGRA_ICC_MC 1000
+#define TEGRA_ICC_EMC 1001
+#define TEGRA_ICC_EMEM 1002
+
#endif /* MEMORY_TEGRA_MC_H */
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index 48ef01c3ff90..ed376ba2d2fe 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -15,6 +15,12 @@ static const struct tegra_mc_client tegra114_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
}, {
.id = 0x01,
.name = "display0a",
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 76ace42a688a..ee8ee39e98ed 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1177,10 +1177,8 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
static int tegra_emc_probe(struct platform_device *pdev)
{
- struct platform_device *mc;
struct device_node *np;
struct tegra_emc *emc;
- struct resource *res;
u32 ram_code;
int err;
@@ -1190,25 +1188,13 @@ static int tegra_emc_probe(struct platform_device *pdev)
emc->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- emc->regs = devm_ioremap_resource(&pdev->dev, res);
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
- np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not get memory controller\n");
- return -ENOENT;
- }
-
- mc = of_find_device_by_node(np);
- of_node_put(np);
- if (!mc)
- return -ENOENT;
-
- emc->mc = platform_get_drvdata(mc);
- if (!emc->mc)
- return -EPROBE_DEFER;
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
ram_code = tegra_read_ram_code();
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 0cede24479bf..e2389573d3c0 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -15,6 +15,12 @@ static const struct tegra_mc_client tegra124_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
}, {
.id = 0x01,
.name = "display0a",
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 027f46287dbf..686aaf477d8a 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -8,19 +8,27 @@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
+#include <linux/devfreq.h>
#include <linux/err.h>
+#include <linux/interconnect-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/types.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
+#include "mc.h"
+
#define EMC_INTSTATUS 0x000
#define EMC_INTMASK 0x004
#define EMC_DBG 0x008
@@ -62,6 +70,11 @@
#define EMC_ODT_READ 0x0b4
#define EMC_FBIO_CFG5 0x104
#define EMC_FBIO_CFG6 0x114
+#define EMC_STAT_CONTROL 0x160
+#define EMC_STAT_LLMC_CONTROL 0x178
+#define EMC_STAT_PWR_CLOCK_LIMIT 0x198
+#define EMC_STAT_PWR_CLOCKS 0x19c
+#define EMC_STAT_PWR_COUNT 0x1a0
#define EMC_AUTO_CAL_INTERVAL 0x2a8
#define EMC_CFG_2 0x2b8
#define EMC_CFG_DIG_DLL 0x2bc
@@ -88,6 +101,12 @@
#define EMC_DBG_READ_DQM_CTRL BIT(9)
#define EMC_DBG_CFG_PRIORITY BIT(24)
+#define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
+
+#define EMC_PWR_GATHER_CLEAR (1 << 8)
+#define EMC_PWR_GATHER_DISABLE (2 << 8)
+#define EMC_PWR_GATHER_ENABLE (3 << 8)
+
static const u16 emc_timing_registers[] = {
EMC_RC,
EMC_RFC,
@@ -142,11 +161,26 @@ struct emc_timing {
u32 data[ARRAY_SIZE(emc_timing_registers)];
};
+enum emc_rate_request_type {
+ EMC_RATE_DEVFREQ,
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
struct tegra_emc {
struct device *dev;
+ struct tegra_mc *mc;
+ struct icc_provider provider;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
+ unsigned int dram_bus_width;
struct emc_timing *timings;
unsigned int num_timings;
@@ -156,6 +190,17 @@ struct tegra_emc {
unsigned long min_rate;
unsigned long max_rate;
} debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
+
+ struct devfreq_simple_ondemand_data ondemand_data;
};
static irqreturn_t tegra_emc_isr(int irq, void *data)
@@ -383,6 +428,11 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
u32 value, ram_code;
int err;
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
if (!of_property_read_bool(dev->of_node, "nvidia,use-ram-code"))
return of_node_get(dev->of_node);
@@ -408,7 +458,7 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
static int emc_setup_hw(struct tegra_emc *emc)
{
u32 intmask = EMC_REFRESH_OVERFLOW_INT;
- u32 emc_cfg, emc_dbg;
+ u32 emc_cfg, emc_dbg, emc_fbio;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -439,6 +489,15 @@ static int emc_setup_hw(struct tegra_emc *emc)
emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+ emc_fbio = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+
+ if (emc_fbio & EMC_FBIO_CFG5_DRAM_WIDTH_X16)
+ emc->dram_bus_width = 16;
+ else
+ emc->dram_bus_width = 32;
+
+ dev_info(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+
return 0;
}
@@ -451,6 +510,9 @@ static long emc_round_rate(unsigned long rate,
struct tegra_emc *emc = arg;
unsigned int i;
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
for (i = 0; i < emc->num_timings; i++) {
@@ -480,6 +542,83 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
/*
* debugfs interface
*
@@ -563,7 +702,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_min_rate(emc->clk, rate);
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -593,7 +732,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_max_rate(emc->clk, rate);
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -650,47 +789,330 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc, &tegra_emc_debug_max_rate_fops);
}
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ unsigned int dram_data_bus_width_bytes;
+ int err;
+
+ /*
+ * Tegra20 EMC runs on x2 clock rate of SDRAM bus because DDR data
+ * is sampled on both clock edges. This means that EMC clock rate
+ * equals to the peak data-rate.
+ */
+ dram_data_bus_width_bytes = emc->dram_bus_width / 8;
+ do_div(rate, dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc;
+ struct icc_node *node;
+ int err;
+
+ emc->mc = devm_tegra_memory_controller_get(emc->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ soc = emc->mc->soc;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ err = icc_provider_add(&emc->provider);
+ if (err)
+ goto err_msg;
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+del_provider:
+ icc_provider_del(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+{
+ u32 hw_version = BIT(tegra_sku_info.soc_process_id);
+ struct opp_table *clk_opp_table, *hw_opp_table;
+ int err;
+
+ clk_opp_table = dev_pm_opp_set_clkname(emc->dev, NULL);
+ err = PTR_ERR_OR_ZERO(clk_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP clk: %d\n", err);
+ return err;
+ }
+
+ hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ err = PTR_ERR_OR_ZERO(hw_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
+ goto put_clk_table;
+ }
+
+ err = dev_pm_opp_of_add_table(emc->dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ else
+ dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+
+ goto put_hw_table;
+ }
+
+ dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
+
+ /* first dummy rate-set initializes voltage state */
+ err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
+ if (err) {
+ dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ goto remove_table;
+ }
+
+ return 0;
+
+remove_table:
+ dev_pm_opp_of_remove_table(emc->dev);
+put_hw_table:
+ dev_pm_opp_put_supported_hw(hw_opp_table);
+put_clk_table:
+ dev_pm_opp_put_clkname(clk_opp_table);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to find opp for %lu Hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+
+ rate = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ);
+}
+
+static int tegra_emc_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ /* freeze counters */
+ writel_relaxed(EMC_PWR_GATHER_DISABLE, emc->regs + EMC_STAT_CONTROL);
+
+ /*
+ * busy_time: number of clocks EMC request was accepted
+ * total_time: number of clocks PWR_GATHER control was set to ENABLE
+ */
+ stat->busy_time = readl_relaxed(emc->regs + EMC_STAT_PWR_COUNT);
+ stat->total_time = readl_relaxed(emc->regs + EMC_STAT_PWR_CLOCKS);
+ stat->current_frequency = clk_get_rate(emc->clk);
+
+ /* clear counters and restart */
+ writel_relaxed(EMC_PWR_GATHER_CLEAR, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(EMC_PWR_GATHER_ENABLE, emc->regs + EMC_STAT_CONTROL);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile tegra_emc_devfreq_profile = {
+ .polling_ms = 30,
+ .target = tegra_emc_devfreq_target,
+ .get_dev_status = tegra_emc_devfreq_get_dev_status,
+};
+
+static int tegra_emc_devfreq_init(struct tegra_emc *emc)
+{
+ struct devfreq *devfreq;
+
+ /*
+ * PWR_COUNT is 1/2 of PWR_CLOCKS at max, and thus, the up-threshold
+ * should be less than 50. Secondly, multiple active memory clients
+ * may cause over 20% of lost clock cycles due to stalls caused by
+ * competing memory accesses. This means that threshold should be
+ * set to a less than 30 in order to have a properly working governor.
+ */
+ emc->ondemand_data.upthreshold = 20;
+
+ /*
+ * Reset statistic gathers state, select global bandwidth for the
+ * statistics collection mode and set clocks counter saturation
+ * limit to maximum.
+ */
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL);
+ writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT);
+
+ devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &emc->ondemand_data);
+ if (IS_ERR(devfreq)) {
+ dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq);
+ return PTR_ERR(devfreq);
+ }
+
+ return 0;
+}
+
static int tegra_emc_probe(struct platform_device *pdev)
{
struct device_node *np;
struct tegra_emc *emc;
- struct resource *res;
int irq, err;
- /* driver has nothing to do in a case of memory timing absence */
- if (of_get_child_count(pdev->dev.of_node) == 0) {
- dev_info(&pdev->dev,
- "EMC device tree node doesn't have memory timings\n");
- return 0;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "interrupt not specified\n");
dev_err(&pdev->dev, "please update your device tree\n");
return irq;
}
- np = tegra_emc_find_node_by_ram_code(&pdev->dev);
- if (!np)
- return -EINVAL;
-
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
- if (!emc) {
- of_node_put(np);
+ if (!emc)
return -ENOMEM;
- }
+ mutex_init(&emc->rate_lock);
emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
emc->dev = &pdev->dev;
- err = tegra_emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
+ np = tegra_emc_find_node_by_ram_code(&pdev->dev);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- emc->regs = devm_ioremap_resource(&pdev->dev, res);
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
@@ -701,41 +1123,39 @@ static int tegra_emc_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
dev_name(&pdev->dev), emc);
if (err) {
- dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", irq, err);
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
return err;
}
- tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
-
- emc->clk = devm_clk_get(&pdev->dev, "emc");
- if (IS_ERR(emc->clk)) {
- err = PTR_ERR(emc->clk);
- dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
- goto unset_cb;
- }
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
- err = clk_notifier_register(emc->clk, &emc->clk_nb);
- if (err) {
- dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
- err);
- goto unset_cb;
- }
+ err = tegra_emc_opp_table_init(emc);
+ if (err)
+ return err;
platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
+ tegra_emc_devfreq_init(emc);
- return 0;
-
-unset_cb:
- tegra20_clk_set_emc_round_callback(NULL, NULL);
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
- return err;
+ return 0;
}
static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra20-emc", },
{},
};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
static struct platform_driver tegra_emc_driver = {
.probe = tegra_emc_probe,
@@ -743,11 +1163,11 @@ static struct platform_driver tegra_emc_driver = {
.name = "tegra20-emc",
.of_match_table = tegra_emc_of_match,
.suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
},
};
+module_platform_driver(tegra_emc_driver);
-static int __init tegra_emc_init(void)
-{
- return platform_driver_register(&tegra_emc_driver);
-}
-subsys_initcall(tegra_emc_init);
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index a8098bff91d9..29ecf02805a0 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -3,6 +3,10 @@
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
#include <dt-bindings/memory/tegra20-mc.h>
#include "mc.h"
@@ -280,6 +284,78 @@ static const struct tegra_mc_reset_ops tegra20_mc_reset_ops = {
.reset_status = tegra20_mc_reset_status,
};
+static int tegra20_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ /*
+ * It should be possible to tune arbitration knobs here, but the
+ * default values are known to work well on all devices. Hence
+ * nothing to do here so far.
+ */
+ return 0;
+}
+
+static int tegra20_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 300);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra20_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ ndata->node = node;
+
+ /* these clients are isochronous by default */
+ if (strstarts(node->name, "display") ||
+ strstarts(node->name, "vi"))
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ else
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra20_mc_icc_ops = {
+ .xlate_extended = tegra20_mc_of_icc_xlate_extended,
+ .aggregate = tegra20_mc_icc_aggreate,
+ .set = tegra20_mc_icc_set,
+};
+
const struct tegra_mc_soc tegra20_mc_soc = {
.clients = tegra20_mc_clients,
.num_clients = ARRAY_SIZE(tegra20_mc_clients),
@@ -290,4 +366,5 @@ const struct tegra_mc_soc tegra20_mc_soc = {
.reset_ops = &tegra20_mc_reset_ops,
.resets = tegra20_mc_resets,
.num_resets = ARRAY_SIZE(tegra20_mc_resets),
+ .icc_ops = &tegra20_mc_icc_ops,
};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index cdd663ba4733..5f224796e32e 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -1828,7 +1828,6 @@ static int tegra210_emc_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cd;
unsigned long current_rate;
- struct platform_device *mc;
struct tegra210_emc *emc;
struct device_node *np;
unsigned int i;
@@ -1846,35 +1845,19 @@ static int tegra210_emc_probe(struct platform_device *pdev)
spin_lock_init(&emc->lock);
emc->dev = &pdev->dev;
- np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not get memory controller\n");
- return -ENOENT;
- }
-
- mc = of_find_device_by_node(np);
- of_node_put(np);
- if (!mc)
- return -ENOENT;
-
- emc->mc = platform_get_drvdata(mc);
- if (!emc->mc) {
- put_device(&mc->dev);
- return -EPROBE_DEFER;
- }
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
emc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(emc->regs)) {
- err = PTR_ERR(emc->regs);
- goto put_mc;
- }
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
for (i = 0; i < 2; i++) {
emc->channel[i] = devm_platform_ioremap_resource(pdev, 1 + i);
- if (IS_ERR(emc->channel[i])) {
- err = PTR_ERR(emc->channel[i]);
- goto put_mc;
- }
+ if (IS_ERR(emc->channel[i]))
+ return PTR_ERR(emc->channel[i]);
+
}
tegra210_emc_detect(emc);
@@ -1884,7 +1867,7 @@ static int tegra210_emc_probe(struct platform_device *pdev)
err = of_reserved_mem_device_init_by_name(emc->dev, np, "nominal");
if (err < 0) {
dev_err(emc->dev, "failed to get nominal EMC table: %d\n", err);
- goto put_mc;
+ return err;
}
err = of_reserved_mem_device_init_by_name(emc->dev, np, "derated");
@@ -2015,8 +1998,7 @@ detach:
tegra210_clk_emc_detach(emc->clk);
release:
of_reserved_mem_device_release(emc->dev);
-put_mc:
- put_device(emc->mc->dev);
+
return err;
}
@@ -2027,7 +2009,6 @@ static int tegra210_emc_remove(struct platform_device *pdev)
debugfs_remove_recursive(emc->debugfs.root);
tegra210_clk_emc_detach(emc->clk);
of_reserved_mem_device_release(emc->dev);
- put_device(emc->mc->dev);
return 0;
}
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index 7fb8b5438bf4..b3bbc5a05ba1 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -24,7 +24,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2e8,
.shift = 0,
.mask = 0xff,
- .def = 0xc2,
+ .def = 0x1e,
},
}, {
.id = 0x02,
@@ -38,7 +38,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f4,
.shift = 0,
.mask = 0xff,
- .def = 0xc6,
+ .def = 0x1e,
},
}, {
.id = 0x03,
@@ -52,7 +52,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2e8,
.shift = 16,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x04,
@@ -66,7 +66,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f4,
.shift = 16,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x05,
@@ -80,7 +80,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2ec,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x06,
@@ -94,7 +94,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f8,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x0e,
@@ -108,7 +108,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2e0,
.shift = 0,
.mask = 0xff,
- .def = 0x13,
+ .def = 0x2e,
},
}, {
.id = 0x0f,
@@ -136,7 +136,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f0,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x11,
@@ -150,7 +150,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2fc,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x15,
@@ -380,7 +380,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x350,
.shift = 16,
.mask = 0xff,
- .def = 0x65,
+ .def = 0x80,
},
}, {
.id = 0x44,
@@ -620,7 +620,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f0,
.shift = 16,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x60,
@@ -648,7 +648,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x3bc,
.shift = 0,
.mask = 0xff,
- .def = 0x49,
+ .def = 0x5a,
},
}, {
.id = 0x62,
@@ -676,7 +676,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x3c4,
.shift = 0,
.mask = 0xff,
- .def = 0x49,
+ .def = 0x5a,
},
}, {
.id = 0x64,
@@ -897,7 +897,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.bit = 1,
},
.la = {
- .reg = 0xb98,
+ .reg = 0x3e0,
.shift = 16,
.mask = 0xff,
.def = 0x80,
@@ -956,7 +956,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x3ec,
.shift = 16,
.mask = 0xff,
- .def = 0xff,
+ .def = 0x80,
},
}, {
.id = 0x86,
@@ -1020,35 +1020,45 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
};
static const struct tegra_smmu_swgroup tegra210_swgroups[] = {
- { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
- { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
{ .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
{ .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
- { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
{ .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
{ .name = "nvenc", .swgroup = TEGRA_SWGROUP_NVENC, .reg = 0x264 },
+ { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
+ { .name = "nv2", .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c },
{ .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
{ .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 },
- { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
{ .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
{ .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
- { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
- { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
{ .name = "a9avp", .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 },
- { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+ { .name = "ppcs1", .swgroup = TEGRA_SWGROUP_PPCS1, .reg = 0x298 },
+ { .name = "dc1", .swgroup = TEGRA_SWGROUP_DC1, .reg = 0xa88 },
{ .name = "sdmmc1a", .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 },
{ .name = "sdmmc2a", .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 },
{ .name = "sdmmc3a", .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c },
{ .name = "sdmmc4a", .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 },
- { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
- { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
+ { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "ppcs2", .swgroup = TEGRA_SWGROUP_PPCS2, .reg = 0xab0 },
{ .name = "nvdec", .swgroup = TEGRA_SWGROUP_NVDEC, .reg = 0xab4 },
{ .name = "ape", .swgroup = TEGRA_SWGROUP_APE, .reg = 0xab8 },
- { .name = "nvjpg", .swgroup = TEGRA_SWGROUP_NVJPG, .reg = 0xac0 },
{ .name = "se", .swgroup = TEGRA_SWGROUP_SE, .reg = 0xabc },
+ { .name = "nvjpg", .swgroup = TEGRA_SWGROUP_NVJPG, .reg = 0xac0 },
+ { .name = "hc1", .swgroup = TEGRA_SWGROUP_HC1, .reg = 0xac4 },
+ { .name = "se1", .swgroup = TEGRA_SWGROUP_SE1, .reg = 0xac8 },
{ .name = "axiap", .swgroup = TEGRA_SWGROUP_AXIAP, .reg = 0xacc },
{ .name = "etr", .swgroup = TEGRA_SWGROUP_ETR, .reg = 0xad0 },
{ .name = "tsecb", .swgroup = TEGRA_SWGROUP_TSECB, .reg = 0xad4 },
+ { .name = "tsec1", .swgroup = TEGRA_SWGROUP_TSEC1, .reg = 0xad8 },
+ { .name = "tsecb1", .swgroup = TEGRA_SWGROUP_TSECB1, .reg = 0xadc },
+ { .name = "nvdec1", .swgroup = TEGRA_SWGROUP_NVDEC1, .reg = 0xae0 },
};
static const unsigned int tegra210_group_display[] = {
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 055af0e08a2e..44ac155936aa 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -14,16 +14,21 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/interconnect-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/types.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
#include "mc.h"
@@ -323,9 +328,21 @@ struct emc_timing {
bool emc_cfg_dyn_self_ref;
};
+enum emc_rate_request_type {
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
struct tegra_emc {
struct device *dev;
struct tegra_mc *mc;
+ struct icc_provider provider;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
@@ -352,6 +369,15 @@ struct tegra_emc {
unsigned long min_rate;
unsigned long max_rate;
} debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
};
static int emc_seq_update_timing(struct tegra_emc *emc)
@@ -988,6 +1014,11 @@ static struct device_node *emc_find_node_by_ram_code(struct device *dev)
u32 value, ram_code;
int err;
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
ram_code = tegra_read_ram_code();
for_each_child_of_node(dev->of_node, np) {
@@ -1057,6 +1088,9 @@ static long emc_round_rate(unsigned long rate,
struct tegra_emc *emc = arg;
unsigned int i;
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
for (i = 0; i < emc->num_timings; i++) {
@@ -1086,6 +1120,83 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
/*
* debugfs interface
*
@@ -1169,7 +1280,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_min_rate(emc->clk, rate);
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -1199,7 +1310,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_max_rate(emc->clk, rate);
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -1256,51 +1367,239 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc, &tegra_emc_debug_max_rate_fops);
}
-static int tegra_emc_probe(struct platform_device *pdev)
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
{
- struct platform_device *mc;
- struct device_node *np;
- struct tegra_emc *emc;
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ const unsigned int dram_data_bus_width_bytes = 4;
+ const unsigned int ddr = 2;
+ int err;
+
+ /*
+ * Tegra30 EMC runs on a clock rate of SDRAM bus. This means that
+ * EMC clock rate is twice smaller than the peak data rate because
+ * data is sampled on both EMC clock edges.
+ */
+ do_div(rate, ddr * dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc = emc->mc->soc;
+ struct icc_node *node;
int err;
- if (of_get_child_count(pdev->dev.of_node) == 0) {
- dev_info(&pdev->dev,
- "device-tree node doesn't have memory timings\n");
- return -ENODEV;
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ err = icc_provider_add(&emc->provider);
+ if (err)
+ goto err_msg;
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
}
- np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not get memory controller node\n");
- return -ENOENT;
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
}
- mc = of_find_device_by_node(np);
- of_node_put(np);
- if (!mc)
- return -ENOENT;
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
- np = emc_find_node_by_ram_code(&pdev->dev);
- if (!np)
- return -EINVAL;
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+del_provider:
+ icc_provider_del(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+{
+ u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ struct opp_table *clk_opp_table, *hw_opp_table;
+ int err;
+
+ clk_opp_table = dev_pm_opp_set_clkname(emc->dev, NULL);
+ err = PTR_ERR_OR_ZERO(clk_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP clk: %d\n", err);
+ return err;
+ }
+
+ hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ err = PTR_ERR_OR_ZERO(hw_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
+ goto put_clk_table;
+ }
+
+ err = dev_pm_opp_of_add_table(emc->dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ else
+ dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+
+ goto put_hw_table;
+ }
+
+ dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
+
+ /* first dummy rate-set initializes voltage state */
+ err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
+ if (err) {
+ dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ goto remove_table;
+ }
+
+ return 0;
+
+remove_table:
+ dev_pm_opp_of_remove_table(emc->dev);
+put_hw_table:
+ dev_pm_opp_put_supported_hw(hw_opp_table);
+put_clk_table:
+ dev_pm_opp_put_clkname(clk_opp_table);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct tegra_emc *emc;
+ int err;
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
- if (!emc) {
- of_node_put(np);
+ if (!emc)
return -ENOMEM;
- }
- emc->mc = platform_get_drvdata(mc);
- if (!emc->mc)
- return -EPROBE_DEFER;
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+ mutex_init(&emc->rate_lock);
emc->clk_nb.notifier_call = emc_clk_change_notify;
emc->dev = &pdev->dev;
- err = emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
+ np = emc_find_node_by_ram_code(&pdev->dev);
+ if (np) {
+ err = emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
@@ -1311,10 +1610,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
err = platform_get_irq(pdev, 0);
- if (err < 0) {
- dev_err(&pdev->dev, "interrupt not specified: %d\n", err);
+ if (err < 0)
return err;
- }
+
emc->irq = err;
err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0,
@@ -1324,31 +1622,27 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
}
- tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
-
- emc->clk = devm_clk_get(&pdev->dev, "emc");
- if (IS_ERR(emc->clk)) {
- err = PTR_ERR(emc->clk);
- dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
- goto unset_cb;
- }
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
- err = clk_notifier_register(emc->clk, &emc->clk_nb);
- if (err) {
- dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
- err);
- goto unset_cb;
- }
+ err = tegra_emc_opp_table_init(emc);
+ if (err)
+ return err;
platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
- return 0;
-
-unset_cb:
- tegra20_clk_set_emc_round_callback(NULL, NULL);
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
- return err;
+ return 0;
}
static int tegra_emc_suspend(struct device *dev)
@@ -1393,6 +1687,7 @@ static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra30-emc", },
{},
};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
static struct platform_driver tegra_emc_driver = {
.probe = tegra_emc_probe,
@@ -1401,11 +1696,11 @@ static struct platform_driver tegra_emc_driver = {
.of_match_table = tegra_emc_of_match,
.pm = &tegra_emc_pm_ops,
.suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
},
};
+module_platform_driver(tegra_emc_driver);
-static int __init tegra_emc_init(void)
-{
- return platform_driver_register(&tegra_emc_driver);
-}
-subsys_initcall(tegra_emc_init);
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra30 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index fcdd812eed80..ea849003014b 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -4,7 +4,8 @@
*/
#include <linux/of.h>
-#include <linux/mm.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
#include <dt-bindings/memory/tegra30-mc.h>
@@ -36,6 +37,13 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
+ .fifo_size = 16 * 2,
}, {
.id = 0x01,
.name = "display0a",
@@ -50,6 +58,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x02,
.name = "display0ab",
@@ -64,6 +73,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x03,
.name = "display0b",
@@ -78,6 +88,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x04,
.name = "display0bb",
@@ -92,6 +103,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x05,
.name = "display0c",
@@ -106,6 +118,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x06,
.name = "display0cb",
@@ -120,6 +133,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x07,
.name = "display1b",
@@ -134,6 +148,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x08,
.name = "display1bb",
@@ -148,6 +163,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x09,
.name = "eppup",
@@ -162,6 +178,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x17,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x0a,
.name = "g2pr",
@@ -176,6 +193,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x09,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x0b,
.name = "g2sr",
@@ -190,6 +208,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x09,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x0c,
.name = "mpeunifbr",
@@ -204,6 +223,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x50,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x0d,
.name = "viruv",
@@ -218,6 +238,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x2c,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x0e,
.name = "afir",
@@ -232,6 +253,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x0f,
.name = "avpcarm7r",
@@ -246,6 +268,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x04,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x10,
.name = "displayhc",
@@ -260,6 +283,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x11,
.name = "displayhcb",
@@ -274,6 +298,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x12,
.name = "fdcdrd",
@@ -288,6 +313,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x13,
.name = "fdcdrd2",
@@ -302,6 +328,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x14,
.name = "g2dr",
@@ -316,6 +343,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x15,
.name = "hdar",
@@ -330,6 +358,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x16,
.name = "host1xdmar",
@@ -344,6 +373,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x05,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x17,
.name = "host1xr",
@@ -358,6 +388,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x50,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x18,
.name = "idxsrd",
@@ -372,6 +403,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x19,
.name = "idxsrd2",
@@ -386,6 +418,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x1a,
.name = "mpe_ipred",
@@ -400,6 +433,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x80,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x1b,
.name = "mpeamemrd",
@@ -414,6 +448,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x42,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x1c,
.name = "mpecsrd",
@@ -428,6 +463,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x1d,
.name = "ppcsahbdmar",
@@ -442,6 +478,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x1e,
.name = "ppcsahbslvr",
@@ -456,6 +493,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x12,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x1f,
.name = "satar",
@@ -470,6 +508,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x33,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x20,
.name = "texsrd",
@@ -484,6 +523,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x21,
.name = "texsrd2",
@@ -498,6 +538,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x22,
.name = "vdebsevr",
@@ -512,6 +553,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x23,
.name = "vdember",
@@ -526,6 +568,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xd0,
},
+ .fifo_size = 16 * 4,
}, {
.id = 0x24,
.name = "vdemcer",
@@ -540,6 +583,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x2a,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x25,
.name = "vdetper",
@@ -554,6 +598,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x74,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x26,
.name = "mpcorelpr",
@@ -564,6 +609,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x04,
},
+ .fifo_size = 16 * 14,
}, {
.id = 0x27,
.name = "mpcorer",
@@ -574,6 +620,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x04,
},
+ .fifo_size = 16 * 14,
}, {
.id = 0x28,
.name = "eppu",
@@ -588,6 +635,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x6c,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x29,
.name = "eppv",
@@ -602,6 +650,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x6c,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2a,
.name = "eppy",
@@ -616,6 +665,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x6c,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2b,
.name = "mpeunifbw",
@@ -630,6 +680,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x2c,
.name = "viwsb",
@@ -644,6 +695,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x12,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2d,
.name = "viwu",
@@ -658,6 +710,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xb2,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2e,
.name = "viwv",
@@ -672,6 +725,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xb2,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2f,
.name = "viwy",
@@ -686,6 +740,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x12,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x30,
.name = "g2dw",
@@ -700,6 +755,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x9,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x31,
.name = "afiw",
@@ -714,6 +770,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0c,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x32,
.name = "avpcarm7w",
@@ -728,6 +785,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0e,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x33,
.name = "fdcdwr",
@@ -742,6 +800,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x34,
.name = "fdcdwr2",
@@ -756,6 +815,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x35,
.name = "hdaw",
@@ -770,6 +830,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x36,
.name = "host1xw",
@@ -784,6 +845,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x37,
.name = "ispw",
@@ -798,6 +860,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x38,
.name = "mpcorelpw",
@@ -808,6 +871,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0e,
},
+ .fifo_size = 16 * 24,
}, {
.id = 0x39,
.name = "mpcorew",
@@ -818,6 +882,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0e,
},
+ .fifo_size = 16 * 24,
}, {
.id = 0x3a,
.name = "mpecswr",
@@ -832,6 +897,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x3b,
.name = "ppcsahbdmaw",
@@ -846,6 +912,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x3c,
.name = "ppcsahbslvw",
@@ -860,6 +927,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x06,
},
+ .fifo_size = 16 * 4,
}, {
.id = 0x3d,
.name = "sataw",
@@ -874,6 +942,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x33,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x3e,
.name = "vdebsevw",
@@ -888,6 +957,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 4,
}, {
.id = 0x3f,
.name = "vdedbgw",
@@ -902,6 +972,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x40,
.name = "vdembew",
@@ -916,6 +987,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x42,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x41,
.name = "vdetpmw",
@@ -930,6 +1002,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x2a,
},
+ .fifo_size = 16 * 16,
},
};
@@ -1011,6 +1084,175 @@ static const struct tegra_mc_reset tegra30_mc_resets[] = {
TEGRA30_MC_RESET(VI, 0x200, 0x204, 17),
};
+static void tegra30_mc_tune_client_latency(struct tegra_mc *mc,
+ const struct tegra_mc_client *client,
+ unsigned int bandwidth_mbytes_sec)
+{
+ u32 arb_tolerance_compensation_nsec, arb_tolerance_compensation_div;
+ const struct tegra_mc_la *la = &client->la;
+ unsigned int fifo_size = client->fifo_size;
+ u32 arb_nsec, la_ticks, value;
+
+ /* see 18.4.1 Client Configuration in Tegra3 TRM v03p */
+ if (bandwidth_mbytes_sec)
+ arb_nsec = fifo_size * NSEC_PER_USEC / bandwidth_mbytes_sec;
+ else
+ arb_nsec = U32_MAX;
+
+ /*
+ * Latency allowness should be set with consideration for the module's
+ * latency tolerance and internal buffering capabilities.
+ *
+ * Display memory clients use isochronous transfers and have very low
+ * tolerance to a belated transfers. Hence we need to compensate the
+ * memory arbitration imperfection for them in order to prevent FIFO
+ * underflow condition when memory bus is busy.
+ *
+ * VI clients also need a stronger compensation.
+ */
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_MPCORE:
+ case TEGRA_SWGROUP_PTC:
+ /*
+ * We always want lower latency for these clients, hence
+ * don't touch them.
+ */
+ return;
+
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 2;
+ break;
+
+ case TEGRA_SWGROUP_VI:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 1;
+ break;
+
+ default:
+ arb_tolerance_compensation_nsec = 150;
+ arb_tolerance_compensation_div = 1;
+ break;
+ }
+
+ if (arb_nsec > arb_tolerance_compensation_nsec)
+ arb_nsec -= arb_tolerance_compensation_nsec;
+ else
+ arb_nsec = 0;
+
+ arb_nsec /= arb_tolerance_compensation_div;
+
+ /*
+ * Latency allowance is a number of ticks a request from a particular
+ * client may wait in the EMEM arbiter before it becomes a high-priority
+ * request.
+ */
+ la_ticks = arb_nsec / mc->tick;
+ la_ticks = min(la_ticks, la->mask);
+
+ value = mc_readl(mc, la->reg);
+ value &= ~(la->mask << la->shift);
+ value |= la_ticks << la->shift;
+ mc_writel(mc, value, la->reg);
+}
+
+static int tegra30_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(src->provider);
+ const struct tegra_mc_client *client = &mc->soc->clients[src->id];
+ u64 peak_bandwidth = icc_units_to_bps(src->peak_bw);
+
+ /*
+ * Skip pre-initialization that is done by icc_node_add(), which sets
+ * bandwidth to maximum for all clients before drivers are loaded.
+ *
+ * This doesn't make sense for us because we don't have drivers for all
+ * clients and it's okay to keep configuration left from bootloader
+ * during boot, at least for today.
+ */
+ if (src == dst)
+ return 0;
+
+ /* convert bytes/sec to megabytes/sec */
+ do_div(peak_bandwidth, 1000000);
+
+ tegra30_mc_tune_client_latency(mc, client, peak_bandwidth);
+
+ return 0;
+}
+
+static int tegra30_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 400);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra30_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ const struct tegra_mc_client *client;
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ client = &mc->soc->clients[idx];
+ ndata->node = node;
+
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ case TEGRA_SWGROUP_PTC:
+ case TEGRA_SWGROUP_VI:
+ /* these clients are isochronous by default */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ break;
+
+ default:
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+ break;
+ }
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra30_mc_icc_ops = {
+ .xlate_extended = tegra30_mc_of_icc_xlate_extended,
+ .aggregate = tegra30_mc_icc_aggreate,
+ .set = tegra30_mc_icc_set,
+};
+
const struct tegra_mc_soc tegra30_mc_soc = {
.clients = tegra30_mc_clients,
.num_clients = ARRAY_SIZE(tegra30_mc_clients),
@@ -1025,4 +1267,5 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra30_mc_resets,
.num_resets = ARRAY_SIZE(tegra30_mc_resets),
+ .icc_ops = &tegra30_mc_icc_ops,
};
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 9903e9660a38..549797d0301d 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -57,7 +57,7 @@
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
-#include <linux/interrupt.h> /* needed for in_interrupt() proto */
+#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/kthread.h>
#include <scsi/scsi_host.h>
@@ -473,7 +473,6 @@ mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
mpt_free_msg_frame(ioc, mf);
mb();
return;
- break;
}
mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
break;
@@ -6336,7 +6335,6 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
* Page header is updated.
*
* Returns 0 for success
- * -EPERM if not allowed due to ISR context
* -EAGAIN if no msg frames currently available
* -EFAULT for non-successful reply or no reply (timeout)
*/
@@ -6354,19 +6352,10 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
u8 page_type = 0, extend_page;
unsigned long timeleft;
unsigned long flags;
- int in_isr;
u8 issue_hard_reset = 0;
u8 retry_count = 0;
- /* Prevent calling wait_event() (below), if caller happens
- * to be in ISR context, because that is fatal!
- */
- in_isr = in_interrupt();
- if (in_isr) {
- dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
- ioc->name));
- return -EPERM;
- }
+ might_sleep();
/* don't send a config page during diag reset */
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index f92b0433f599..0484e9c15c09 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -50,7 +50,7 @@
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h> /* for mdelay */
-#include <linux/interrupt.h> /* needed for in_interrupt() proto */
+#include <linux/interrupt.h>
#include <linux/reboot.h> /* notifier code */
#include <linux/workqueue.h>
#include <linux/sort.h>
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 18b91ea1a353..5eb0b3361e4e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -289,6 +289,7 @@ mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
+ fw_event->users = 1;
INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)"
"on cpuid %d\n", ioc->name, __func__,
@@ -314,6 +315,15 @@ mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
+static void __mptsas_free_fw_event(MPT_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
+ ioc->name, __func__, fw_event));
+ list_del(&fw_event->list);
+ kfree(fw_event);
+}
+
/* free memory associated to a sas firmware event */
static void
mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
@@ -321,10 +331,9 @@ mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
- devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
- ioc->name, __func__, fw_event));
- list_del(&fw_event->list);
- kfree(fw_event);
+ fw_event->users--;
+ if (!fw_event->users)
+ __mptsas_free_fw_event(ioc, fw_event);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -333,9 +342,10 @@ mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
static void
mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
{
- struct fw_event_work *fw_event, *next;
+ struct fw_event_work *fw_event;
struct mptsas_target_reset_event *target_reset_list, *n;
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ unsigned long flags;
/* flush the target_reset_list */
if (!list_empty(&hd->target_reset_list)) {
@@ -350,14 +360,29 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
}
}
- if (list_empty(&ioc->fw_event_list) ||
- !ioc->fw_event_q || in_interrupt())
+ if (list_empty(&ioc->fw_event_list) || !ioc->fw_event_q)
return;
- list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
- if (cancel_delayed_work(&fw_event->work))
- mptsas_free_fw_event(ioc, fw_event);
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+
+ while (!list_empty(&ioc->fw_event_list)) {
+ bool canceled = false;
+
+ fw_event = list_first_entry(&ioc->fw_event_list,
+ struct fw_event_work, list);
+ fw_event->users++;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (cancel_delayed_work_sync(&fw_event->work))
+ canceled = true;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ if (canceled)
+ fw_event->users--;
+ fw_event->users--;
+ WARN_ON_ONCE(fw_event->users);
+ __mptsas_free_fw_event(ioc, fw_event);
}
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index e35b13891fe4..71abf3477495 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -107,6 +107,7 @@ struct mptsas_hotplug_event {
struct fw_event_work {
struct list_head list;
struct delayed_work work;
+ int users;
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index e7f0d4ae0f96..ce2e5b21978e 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -52,7 +52,7 @@
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h> /* for mdelay */
-#include <linux/interrupt.h> /* needed for in_interrupt() proto */
+#include <linux/interrupt.h>
#include <linux/reboot.h> /* notifier code */
#include <linux/workqueue.h>
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index eabc4de5816c..af0ce5611e4a 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -52,7 +52,7 @@
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h> /* for mdelay */
-#include <linux/interrupt.h> /* needed for in_interrupt() proto */
+#include <linux/interrupt.h>
#include <linux/reboot.h> /* notifier code */
#include <linux/workqueue.h>
#include <linux/raid_class.h>
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 4e8d0d6b9b5c..c7f964996a91 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -121,7 +121,7 @@ static const struct i2c_device_id pm80x_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, pm80x_id_table);
-static struct resource rtc_resources[] = {
+static const struct resource rtc_resources[] = {
{
.name = "88pm80x-rtc",
.start = PM800_IRQ_RTC,
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index c9bae71f643a..b1e829ea909b 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -26,99 +26,99 @@
#define INT_STATUS_NUM 3
-static struct resource bk0_resources[] = {
+static const struct resource bk0_resources[] = {
{2, 2, "duty cycle", IORESOURCE_REG, },
{3, 3, "always on", IORESOURCE_REG, },
{3, 3, "current", IORESOURCE_REG, },
};
-static struct resource bk1_resources[] = {
+static const struct resource bk1_resources[] = {
{4, 4, "duty cycle", IORESOURCE_REG, },
{5, 5, "always on", IORESOURCE_REG, },
{5, 5, "current", IORESOURCE_REG, },
};
-static struct resource bk2_resources[] = {
+static const struct resource bk2_resources[] = {
{6, 6, "duty cycle", IORESOURCE_REG, },
{7, 7, "always on", IORESOURCE_REG, },
{5, 5, "current", IORESOURCE_REG, },
};
-static struct resource led0_resources[] = {
+static const struct resource led0_resources[] = {
/* RGB1 Red LED */
{0xd, 0xd, "control", IORESOURCE_REG, },
{0xc, 0xc, "blink", IORESOURCE_REG, },
};
-static struct resource led1_resources[] = {
+static const struct resource led1_resources[] = {
/* RGB1 Green LED */
{0xe, 0xe, "control", IORESOURCE_REG, },
{0xc, 0xc, "blink", IORESOURCE_REG, },
};
-static struct resource led2_resources[] = {
+static const struct resource led2_resources[] = {
/* RGB1 Blue LED */
{0xf, 0xf, "control", IORESOURCE_REG, },
{0xc, 0xc, "blink", IORESOURCE_REG, },
};
-static struct resource led3_resources[] = {
+static const struct resource led3_resources[] = {
/* RGB2 Red LED */
{0x9, 0x9, "control", IORESOURCE_REG, },
{0x8, 0x8, "blink", IORESOURCE_REG, },
};
-static struct resource led4_resources[] = {
+static const struct resource led4_resources[] = {
/* RGB2 Green LED */
{0xa, 0xa, "control", IORESOURCE_REG, },
{0x8, 0x8, "blink", IORESOURCE_REG, },
};
-static struct resource led5_resources[] = {
+static const struct resource led5_resources[] = {
/* RGB2 Blue LED */
{0xb, 0xb, "control", IORESOURCE_REG, },
{0x8, 0x8, "blink", IORESOURCE_REG, },
};
-static struct resource buck1_resources[] = {
+static const struct resource buck1_resources[] = {
{0x24, 0x24, "buck set", IORESOURCE_REG, },
};
-static struct resource buck2_resources[] = {
+static const struct resource buck2_resources[] = {
{0x25, 0x25, "buck set", IORESOURCE_REG, },
};
-static struct resource buck3_resources[] = {
+static const struct resource buck3_resources[] = {
{0x26, 0x26, "buck set", IORESOURCE_REG, },
};
-static struct resource ldo1_resources[] = {
+static const struct resource ldo1_resources[] = {
{0x10, 0x10, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo2_resources[] = {
+static const struct resource ldo2_resources[] = {
{0x11, 0x11, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo3_resources[] = {
+static const struct resource ldo3_resources[] = {
{0x12, 0x12, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo4_resources[] = {
+static const struct resource ldo4_resources[] = {
{0x13, 0x13, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo5_resources[] = {
+static const struct resource ldo5_resources[] = {
{0x14, 0x14, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo6_resources[] = {
+static const struct resource ldo6_resources[] = {
{0x15, 0x15, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo7_resources[] = {
+static const struct resource ldo7_resources[] = {
{0x16, 0x16, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo8_resources[] = {
+static const struct resource ldo8_resources[] = {
{0x17, 0x17, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo9_resources[] = {
+static const struct resource ldo9_resources[] = {
{0x18, 0x18, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo10_resources[] = {
+static const struct resource ldo10_resources[] = {
{0x19, 0x19, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo12_resources[] = {
+static const struct resource ldo12_resources[] = {
{0x1a, 0x1a, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo_vibrator_resources[] = {
+static const struct resource ldo_vibrator_resources[] = {
{0x28, 0x28, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo14_resources[] = {
+static const struct resource ldo14_resources[] = {
{0x1b, 0x1b, "ldo set", IORESOURCE_REG, },
};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index cc0b73280c68..bdfce7b15621 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -730,33 +730,9 @@ config MFD_KEMPLD
select MFD_CORE
help
This is the core driver for the PLD (Programmable Logic Device) found
- on some Kontron ETX and COMexpress (ETXexpress) modules. The PLD
- device may provide functions like watchdog, GPIO, UART and I2C bus.
-
- The following modules are supported:
- * COMe-bBD#
- * COMe-bBL6
- * COMe-bHL6
- * COMe-bSL6
- * COMe-bIP#
- * COMe-bKL6
- * COMe-bPC2 (ETXexpress-PC)
- * COMe-bSC# (ETXexpress-SC T#)
- * COMe-cAL6
- * COMe-cBL6
- * COMe-cBT6
- * COMe-cBW6
- * COMe-cCT6
- * COMe-cDC2 (microETXexpress-DC)
- * COMe-cHL6
- * COMe-cKL6
- * COMe-cPC2 (microETXexpress-PC)
- * COMe-cSL6
- * COMe-mAL10
- * COMe-mBT10
- * COMe-mCT10
- * COMe-mTT10 (nanoETXexpress-TT)
- * ETX-OH
+ on some Kontron ETX and nearly all COMexpress (ETXexpress) modules as
+ well as on some other Kontron products. The PLD device may provide
+ functions like watchdog, GPIO, UART and I2C bus.
This driver can also be built as a module. If so, the module
will be called kempld-core.
@@ -1199,6 +1175,7 @@ config MFD_SIMPLE_MFD_I2C
config MFD_SL28CPLD
tristate "Kontron sl28cpld Board Management Controller"
depends on I2C
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
select MFD_SIMPLE_MFD_I2C
help
Say yes here to enable support for the Kontron sl28cpld board
@@ -1442,6 +1419,7 @@ config MFD_TI_LMU
config MFD_OMAP_USB_HOST
bool "TI OMAP USBHS core and TLL driver"
depends on USB_EHCI_HCD_OMAP || USB_OHCI_HCD_OMAP3
+ depends on COMMON_CLK
default y
help
This is the core driver for the OAMP EHCI and OHCI drivers.
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 6d1bf7c3ca3b..e43dea89b094 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1513,26 +1513,14 @@ static int ab8500_interrupts_show(struct seq_file *s, void *p)
{
int line;
- seq_puts(s, "name: number: number of: wake:\n");
+ seq_puts(s, "name: number: irq: number of: wake:\n");
for (line = 0; line < num_interrupt_lines; line++) {
- struct irq_desc *desc = irq_to_desc(line + irq_first);
-
- seq_printf(s, "%3i: %6i %4i",
+ seq_printf(s, "%3i: %4i %6i %4i\n",
line,
+ line + irq_first,
num_interrupts[line],
num_wake_interrupts[line]);
-
- if (desc && desc->name)
- seq_printf(s, "-%-8s", desc->name);
- if (desc && desc->action) {
- struct irqaction *action = desc->action;
-
- seq_printf(s, " %s", action->name);
- while ((action = action->next) != NULL)
- seq_printf(s, ", %s", action->name);
- }
- seq_putc(s, '\n');
}
return 0;
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index 41076d121dd5..193a96c8b1ea 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -152,7 +152,7 @@ static int sysmgr_probe(struct platform_device *pdev)
if (!base)
return -ENOMEM;
- sysmgr_config.max_register = res->end - res->start - 3;
+ sysmgr_config.max_register = resource_size(res) - 3;
regmap = devm_regmap_init_mmio(dev, base, &sysmgr_config);
}
diff --git a/drivers/mfd/at91-usart.c b/drivers/mfd/at91-usart.c
index 6a8351a4588e..7f08cb60c58b 100644
--- a/drivers/mfd/at91-usart.c
+++ b/drivers/mfd/at91-usart.c
@@ -15,15 +15,11 @@
#include <linux/of.h>
#include <linux/property.h>
-static const struct mfd_cell at91_usart_spi_subdev = {
- .name = "at91_usart_spi",
- .of_compatible = "microchip,at91sam9g45-usart-spi",
-};
+static const struct mfd_cell at91_usart_spi_subdev =
+ MFD_CELL_NAME("at91_usart_spi");
-static const struct mfd_cell at91_usart_serial_subdev = {
- .name = "atmel_usart_serial",
- .of_compatible = "atmel,at91rm9200-usart-serial",
-};
+static const struct mfd_cell at91_usart_serial_subdev =
+ MFD_CELL_NAME("atmel_usart_serial");
static int at91_usart_mode_probe(struct platform_device *pdev)
{
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index 068e9c083f13..2cfde81f5fbf 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -57,6 +57,7 @@ static int axp20x_i2c_remove(struct i2c_client *i2c)
return axp20x_device_remove(axp20x);
}
+#ifdef CONFIG_OF
static const struct of_device_id axp20x_i2c_of_match[] = {
{ .compatible = "x-powers,axp152", .data = (void *)AXP152_ID },
{ .compatible = "x-powers,axp202", .data = (void *)AXP202_ID },
@@ -68,6 +69,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, axp20x_i2c_of_match);
+#endif
static const struct i2c_device_id axp20x_i2c_id[] = {
{ "axp152", 0 },
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index bfac5dc091ca..6ca337cde84c 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -107,7 +107,7 @@ MODULE_DEVICE_TABLE(i2c, bcm590xx_i2c_id);
static struct i2c_driver bcm590xx_i2c_driver = {
.driver = {
.name = "bcm590xx",
- .of_match_table = of_match_ptr(bcm590xx_of_match),
+ .of_match_table = bcm590xx_of_match,
},
.probe = bcm590xx_i2c_probe,
.id_table = bcm590xx_i2c_id,
diff --git a/drivers/mfd/da9055-core.c b/drivers/mfd/da9055-core.c
index 6d0af8486269..d074d213e661 100644
--- a/drivers/mfd/da9055-core.c
+++ b/drivers/mfd/da9055-core.c
@@ -254,14 +254,14 @@ const struct regmap_config da9055_regmap_config = {
};
EXPORT_SYMBOL_GPL(da9055_regmap_config);
-static struct resource da9055_onkey_resource = {
+static const struct resource da9055_onkey_resource = {
.name = "ONKEY",
.start = DA9055_IRQ_NONKEY,
.end = DA9055_IRQ_NONKEY,
.flags = IORESOURCE_IRQ,
};
-static struct resource da9055_rtc_resource[] = {
+static const struct resource da9055_rtc_resource[] = {
{
.name = "ALM",
.start = DA9055_IRQ_ALARM,
@@ -276,14 +276,14 @@ static struct resource da9055_rtc_resource[] = {
},
};
-static struct resource da9055_hwmon_resource = {
+static const struct resource da9055_hwmon_resource = {
.name = "HWMON",
.start = DA9055_IRQ_HWMON,
.end = DA9055_IRQ_HWMON,
.flags = IORESOURCE_IRQ,
};
-static struct resource da9055_ld05_6_resource = {
+static const struct resource da9055_ld05_6_resource = {
.name = "REGULATOR",
.start = DA9055_IRQ_REGULATOR,
.end = DA9055_IRQ_REGULATOR,
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 950b75ff6b04..bc60433b68db 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -74,7 +74,7 @@ static struct i2c_driver da9055_i2c_driver = {
.id_table = da9055_i2c_id,
.driver = {
.name = "da9055-pmic",
- .of_match_table = of_match_ptr(da9055_of_match),
+ .of_match_table = da9055_of_match,
},
};
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index fc30726e2e27..8d913375152d 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -160,23 +160,23 @@ static struct regmap_irq_chip da9062_irq_chip = {
.ack_base = DA9062AA_EVENT_A,
};
-static struct resource da9061_core_resources[] = {
+static const struct resource da9061_core_resources[] = {
DEFINE_RES_IRQ_NAMED(DA9061_IRQ_VDD_WARN, "VDD_WARN"),
};
-static struct resource da9061_regulators_resources[] = {
+static const struct resource da9061_regulators_resources[] = {
DEFINE_RES_IRQ_NAMED(DA9061_IRQ_LDO_LIM, "LDO_LIM"),
};
-static struct resource da9061_thermal_resources[] = {
+static const struct resource da9061_thermal_resources[] = {
DEFINE_RES_IRQ_NAMED(DA9061_IRQ_TEMP, "THERMAL"),
};
-static struct resource da9061_wdt_resources[] = {
+static const struct resource da9061_wdt_resources[] = {
DEFINE_RES_IRQ_NAMED(DA9061_IRQ_WDG_WARN, "WD_WARN"),
};
-static struct resource da9061_onkey_resources[] = {
+static const struct resource da9061_onkey_resources[] = {
DEFINE_RES_IRQ_NAMED(DA9061_IRQ_ONKEY, "ONKEY"),
};
@@ -211,32 +211,32 @@ static const struct mfd_cell da9061_devs[] = {
},
};
-static struct resource da9062_core_resources[] = {
+static const struct resource da9062_core_resources[] = {
DEFINE_RES_NAMED(DA9062_IRQ_VDD_WARN, 1, "VDD_WARN", IORESOURCE_IRQ),
};
-static struct resource da9062_regulators_resources[] = {
+static const struct resource da9062_regulators_resources[] = {
DEFINE_RES_NAMED(DA9062_IRQ_LDO_LIM, 1, "LDO_LIM", IORESOURCE_IRQ),
};
-static struct resource da9062_thermal_resources[] = {
+static const struct resource da9062_thermal_resources[] = {
DEFINE_RES_NAMED(DA9062_IRQ_TEMP, 1, "THERMAL", IORESOURCE_IRQ),
};
-static struct resource da9062_wdt_resources[] = {
+static const struct resource da9062_wdt_resources[] = {
DEFINE_RES_NAMED(DA9062_IRQ_WDG_WARN, 1, "WD_WARN", IORESOURCE_IRQ),
};
-static struct resource da9062_rtc_resources[] = {
+static const struct resource da9062_rtc_resources[] = {
DEFINE_RES_NAMED(DA9062_IRQ_ALARM, 1, "ALARM", IORESOURCE_IRQ),
DEFINE_RES_NAMED(DA9062_IRQ_TICK, 1, "TICK", IORESOURCE_IRQ),
};
-static struct resource da9062_onkey_resources[] = {
+static const struct resource da9062_onkey_resources[] = {
DEFINE_RES_NAMED(DA9062_IRQ_ONKEY, 1, "ONKEY", IORESOURCE_IRQ),
};
-static struct resource da9062_gpio_resources[] = {
+static const struct resource da9062_gpio_resources[] = {
DEFINE_RES_NAMED(DA9062_IRQ_GPI0, 1, "GPI0", IORESOURCE_IRQ),
DEFINE_RES_NAMED(DA9062_IRQ_GPI1, 1, "GPI1", IORESOURCE_IRQ),
DEFINE_RES_NAMED(DA9062_IRQ_GPI2, 1, "GPI2", IORESOURCE_IRQ),
@@ -736,7 +736,7 @@ MODULE_DEVICE_TABLE(i2c, da9062_i2c_id);
static struct i2c_driver da9062_i2c_driver = {
.driver = {
.name = "da9062",
- .of_match_table = of_match_ptr(da9062_dt_ids),
+ .of_match_table = da9062_dt_ids,
},
.probe = da9062_i2c_probe,
.remove = da9062_i2c_remove,
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index a353d52210a9..df407c3afce3 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -29,7 +29,7 @@
#include <linux/uaccess.h>
-static struct resource da9063_regulators_resources[] = {
+static const struct resource da9063_regulators_resources[] = {
{
.name = "LDO_LIM",
.start = DA9063_IRQ_LDO_LIM,
@@ -38,7 +38,7 @@ static struct resource da9063_regulators_resources[] = {
},
};
-static struct resource da9063_rtc_resources[] = {
+static const struct resource da9063_rtc_resources[] = {
{
.name = "ALARM",
.start = DA9063_IRQ_ALARM,
@@ -53,7 +53,7 @@ static struct resource da9063_rtc_resources[] = {
}
};
-static struct resource da9063_onkey_resources[] = {
+static const struct resource da9063_onkey_resources[] = {
{
.name = "ONKEY",
.start = DA9063_IRQ_ONKEY,
@@ -62,7 +62,7 @@ static struct resource da9063_onkey_resources[] = {
},
};
-static struct resource da9063_hwmon_resources[] = {
+static const struct resource da9063_hwmon_resources[] = {
{
.start = DA9063_IRQ_ADC_RDY,
.end = DA9063_IRQ_ADC_RDY,
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index b8217ad303ce..3781d0bb7786 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -455,7 +455,7 @@ MODULE_DEVICE_TABLE(i2c, da9063_i2c_id);
static struct i2c_driver da9063_i2c_driver = {
.driver = {
.name = "da9063",
- .of_match_table = of_match_ptr(da9063_dt_ids),
+ .of_match_table = da9063_dt_ids,
},
.probe = da9063_i2c_probe,
.id_table = da9063_i2c_id,
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
index 7f0aa1e8db96..58009c8cb870 100644
--- a/drivers/mfd/da9150-core.c
+++ b/drivers/mfd/da9150-core.c
@@ -350,18 +350,18 @@ static const struct regmap_irq_chip da9150_regmap_irq_chip = {
.num_irqs = ARRAY_SIZE(da9150_irqs),
};
-static struct resource da9150_gpadc_resources[] = {
+static const struct resource da9150_gpadc_resources[] = {
DEFINE_RES_IRQ_NAMED(DA9150_IRQ_GPADC, "GPADC"),
};
-static struct resource da9150_charger_resources[] = {
+static const struct resource da9150_charger_resources[] = {
DEFINE_RES_IRQ_NAMED(DA9150_IRQ_CHG, "CHG_STATUS"),
DEFINE_RES_IRQ_NAMED(DA9150_IRQ_TJUNC, "CHG_TJUNC"),
DEFINE_RES_IRQ_NAMED(DA9150_IRQ_VFAULT, "CHG_VFAULT"),
DEFINE_RES_IRQ_NAMED(DA9150_IRQ_VBUS, "CHG_VBUS"),
};
-static struct resource da9150_fg_resources[] = {
+static const struct resource da9150_fg_resources[] = {
DEFINE_RES_IRQ_NAMED(DA9150_IRQ_FG, "FG"),
};
@@ -511,7 +511,7 @@ MODULE_DEVICE_TABLE(of, da9150_of_match);
static struct i2c_driver da9150_driver = {
.driver = {
.name = "da9150",
- .of_match_table = of_match_ptr(da9150_of_match),
+ .of_match_table = da9150_of_match,
},
.probe = da9150_probe,
.remove = da9150_remove,
diff --git a/drivers/mfd/ene-kb3930.c b/drivers/mfd/ene-kb3930.c
index 1c32ff586816..83243e668e3f 100644
--- a/drivers/mfd/ene-kb3930.c
+++ b/drivers/mfd/ene-kb3930.c
@@ -202,7 +202,7 @@ static struct i2c_driver kb3930_driver = {
.remove = kb3930_remove,
.driver = {
.name = "ene-kb3930",
- .of_match_table = of_match_ptr(kb3930_dt_ids),
+ .of_match_table = kb3930_dt_ids,
},
};
module_i2c_driver(kb3930_driver);
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index a016b39fe9b0..5f6f0a83e1c5 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -196,7 +196,7 @@ MODULE_DEVICE_TABLE(of, mx25_tsadc_ids);
static struct platform_driver mx25_tsadc_driver = {
.driver = {
.name = "mx25-tsadc",
- .of_match_table = of_match_ptr(mx25_tsadc_ids),
+ .of_match_table = mx25_tsadc_ids,
},
.probe = mx25_tsadc_probe,
.remove = mx25_tsadc_remove,
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 7e3959aaa285..d3c86a7a3805 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -49,7 +49,7 @@ static struct regmap_config hi655x_regmap_config = {
.max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE,
};
-static struct resource pwrkey_resources[] = {
+static const struct resource pwrkey_resources[] = {
{
.name = "down",
.start = PWRON_D20R_INT,
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 247f9849e54a..417b0355d904 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -346,6 +346,7 @@ static int htcpld_register_chip_i2c(
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
dev_warn(dev, "i2c adapter %d non-functional\n",
pdata->i2c_adapter_id);
+ i2c_put_adapter(adapter);
return -EINVAL;
}
@@ -360,6 +361,7 @@ static int htcpld_register_chip_i2c(
/* I2C device registration failed, contineu with the next */
dev_warn(dev, "Unable to add I2C device for 0x%x\n",
plat_chip_data->addr);
+ i2c_put_adapter(adapter);
return PTR_ERR(client);
}
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index bb24c2a07900..daa772f8146b 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -50,23 +50,23 @@ struct intel_msic {
void __iomem *irq_base;
};
-static struct resource msic_touch_resources[] = {
+static const struct resource msic_touch_resources[] = {
DEFINE_RES_IRQ(0),
};
-static struct resource msic_adc_resources[] = {
+static const struct resource msic_adc_resources[] = {
DEFINE_RES_IRQ(0),
};
-static struct resource msic_battery_resources[] = {
+static const struct resource msic_battery_resources[] = {
DEFINE_RES_IRQ(0),
};
-static struct resource msic_gpio_resources[] = {
+static const struct resource msic_gpio_resources[] = {
DEFINE_RES_IRQ(0),
};
-static struct resource msic_audio_resources[] = {
+static const struct resource msic_audio_resources[] = {
DEFINE_RES_IRQ_NAMED(0, "IRQ"),
/*
* We will pass IRQ_BASE to the driver now but this can be removed
@@ -75,19 +75,19 @@ static struct resource msic_audio_resources[] = {
DEFINE_RES_MEM_NAMED(MSIC_IRQ_STATUS_ACCDET, 1, "IRQ_BASE"),
};
-static struct resource msic_hdmi_resources[] = {
+static const struct resource msic_hdmi_resources[] = {
DEFINE_RES_IRQ(0),
};
-static struct resource msic_thermal_resources[] = {
+static const struct resource msic_thermal_resources[] = {
DEFINE_RES_IRQ(0),
};
-static struct resource msic_power_btn_resources[] = {
+static const struct resource msic_power_btn_resources[] = {
DEFINE_RES_IRQ(0),
};
-static struct resource msic_ocd_resources[] = {
+static const struct resource msic_ocd_resources[] = {
DEFINE_RES_IRQ(0),
};
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 84ca7902e1df..fe8ca945f367 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -72,7 +72,7 @@ static const struct dmi_system_id dmi_platform_info[] = {
{}
};
-static struct resource intel_quark_i2c_res[] = {
+static const struct resource intel_quark_i2c_res[] = {
[INTEL_QUARK_IORES_MEM] = {
.flags = IORESOURCE_MEM,
},
@@ -85,7 +85,7 @@ static struct mfd_cell_acpi_match intel_quark_acpi_match_i2c = {
.adr = MFD_ACPI_MATCH_I2C,
};
-static struct resource intel_quark_gpio_res[] = {
+static const struct resource intel_quark_gpio_res[] = {
[INTEL_QUARK_IORES_MEM] = {
.flags = IORESOURCE_MEM,
},
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index eba89780dbe7..47d0d3a69a58 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -200,32 +200,32 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = {
.num_regs = 1,
};
-static struct resource gpio_resources[] = {
+static const struct resource gpio_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_GPIO_LVL1_IRQ, "GPIO"),
};
-static struct resource adc_resources[] = {
+static const struct resource adc_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_ADC_IRQ, "ADC"),
};
-static struct resource usbc_resources[] = {
+static const struct resource usbc_resources[] = {
DEFINE_RES_IRQ(BXTWC_USBC_IRQ),
};
-static struct resource charger_resources[] = {
+static const struct resource charger_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "CHARGER"),
DEFINE_RES_IRQ_NAMED(BXTWC_CHGR1_IRQ, "CHARGER1"),
};
-static struct resource thermal_resources[] = {
+static const struct resource thermal_resources[] = {
DEFINE_RES_IRQ(BXTWC_THRM_LVL1_IRQ),
};
-static struct resource bcu_resources[] = {
+static const struct resource bcu_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_BCU_IRQ, "BCU"),
};
-static struct resource tmu_resources[] = {
+static const struct resource tmu_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_TMU_IRQ, "TMU"),
};
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 64b5c3cc30e7..1c7577b881ff 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -32,23 +32,23 @@ enum {
CHTDC_TI_CCEOCAL = 7, /* battery */
};
-static struct resource power_button_resources[] = {
+static const struct resource power_button_resources[] = {
DEFINE_RES_IRQ(CHTDC_TI_PWRBTN),
};
-static struct resource thermal_resources[] = {
+static const struct resource thermal_resources[] = {
DEFINE_RES_IRQ(CHTDC_TI_DIETMPWARN),
};
-static struct resource adc_resources[] = {
+static const struct resource adc_resources[] = {
DEFINE_RES_IRQ(CHTDC_TI_ADCCMPL),
};
-static struct resource pwrsrc_resources[] = {
+static const struct resource pwrsrc_resources[] = {
DEFINE_RES_IRQ(CHTDC_TI_VBUSDET),
};
-static struct resource battery_resources[] = {
+static const struct resource battery_resources[] = {
DEFINE_RES_IRQ(CHTDC_TI_VBATLOW),
DEFINE_RES_IRQ(CHTDC_TI_CCEOCAL),
};
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index be84bb2aa837..49c5f71664bc 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -41,11 +41,11 @@ enum {
CHT_WC_CRIT_IRQ = 7,
};
-static struct resource cht_wc_pwrsrc_resources[] = {
+static const struct resource cht_wc_pwrsrc_resources[] = {
DEFINE_RES_IRQ(CHT_WC_PWRSRC_IRQ),
};
-static struct resource cht_wc_ext_charger_resources[] = {
+static const struct resource cht_wc_ext_charger_resources[] = {
DEFINE_RES_IRQ(CHT_WC_EXT_CHGR_IRQ),
};
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 429efa1f8e55..38acb20e2d60 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -28,23 +28,23 @@
#define CRYSTAL_COVE_IRQ_GPIO 5
#define CRYSTAL_COVE_IRQ_VHDMIOCP 6
-static struct resource gpio_resources[] = {
+static const struct resource gpio_resources[] = {
DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_GPIO, "GPIO"),
};
-static struct resource pwrsrc_resources[] = {
+static const struct resource pwrsrc_resources[] = {
DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_PWRSRC, "PWRSRC"),
};
-static struct resource adc_resources[] = {
+static const struct resource adc_resources[] = {
DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_ADC, "ADC"),
};
-static struct resource thermal_resources[] = {
+static const struct resource thermal_resources[] = {
DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_THRM, "THERMAL"),
};
-static struct resource bcu_resources[] = {
+static const struct resource bcu_resources[] = {
DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_BCU, "BCU"),
};
diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c
index d939ccc46509..c73ec78f255b 100644
--- a/drivers/mfd/ioc3.c
+++ b/drivers/mfd/ioc3.c
@@ -158,13 +158,13 @@ err:
return -ENOMEM;
}
-static struct resource ioc3_uarta_resources[] = {
+static const struct resource ioc3_uarta_resources[] = {
DEFINE_RES_MEM(offsetof(struct ioc3, sregs.uarta),
sizeof_field(struct ioc3, sregs.uarta)),
DEFINE_RES_IRQ(IOC3_IRQ_SERIAL_A)
};
-static struct resource ioc3_uartb_resources[] = {
+static const struct resource ioc3_uartb_resources[] = {
DEFINE_RES_MEM(offsetof(struct ioc3, sregs.uartb),
sizeof_field(struct ioc3, sregs.uartb)),
DEFINE_RES_IRQ(IOC3_IRQ_SERIAL_B)
@@ -213,7 +213,7 @@ static int ioc3_serial_setup(struct ioc3_priv_data *ipd)
return 0;
}
-static struct resource ioc3_kbd_resources[] = {
+static const struct resource ioc3_kbd_resources[] = {
DEFINE_RES_MEM(offsetof(struct ioc3, serio),
sizeof_field(struct ioc3, serio)),
DEFINE_RES_IRQ(IOC3_IRQ_KBD)
@@ -242,7 +242,7 @@ static int ioc3_kbd_setup(struct ioc3_priv_data *ipd)
return 0;
}
-static struct resource ioc3_eth_resources[] = {
+static const struct resource ioc3_eth_resources[] = {
DEFINE_RES_MEM(offsetof(struct ioc3, eth),
sizeof_field(struct ioc3, eth)),
DEFINE_RES_MEM(offsetof(struct ioc3, ssram),
@@ -250,7 +250,7 @@ static struct resource ioc3_eth_resources[] = {
DEFINE_RES_IRQ(0)
};
-static struct resource ioc3_w1_resources[] = {
+static const struct resource ioc3_w1_resources[] = {
DEFINE_RES_MEM(offsetof(struct ioc3, mcr),
sizeof_field(struct ioc3, mcr)),
};
@@ -294,7 +294,7 @@ static int ioc3_eth_setup(struct ioc3_priv_data *ipd)
return 0;
}
-static struct resource ioc3_m48t35_resources[] = {
+static const struct resource ioc3_m48t35_resources[] = {
DEFINE_RES_MEM(IOC3_BYTEBUS_DEV0, M48T35_REG_SIZE)
};
@@ -326,7 +326,7 @@ static struct ds1685_rtc_platform_data ip30_rtc_platform_data = {
.access_type = ds1685_reg_indirect,
};
-static struct resource ioc3_rtc_ds1685_resources[] = {
+static const struct resource ioc3_rtc_ds1685_resources[] = {
DEFINE_RES_MEM(IOC3_BYTEBUS_DEV1, 1),
DEFINE_RES_MEM(IOC3_BYTEBUS_DEV2, 1),
DEFINE_RES_IRQ(0)
@@ -359,7 +359,7 @@ static int ioc3_ds1685_setup(struct ioc3_priv_data *ipd)
};
-static struct resource ioc3_leds_resources[] = {
+static const struct resource ioc3_leds_resources[] = {
DEFINE_RES_MEM(offsetof(struct ioc3, gppr[0]),
sizeof_field(struct ioc3, gppr[0])),
DEFINE_RES_MEM(offsetof(struct ioc3, gppr[1]),
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 2c9295953c11..9166075c1f32 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -125,7 +125,6 @@ static const struct kempld_platform_data kempld_platform_data_generic = {
};
static struct platform_device *kempld_pdev;
-static bool kempld_acpi_mode;
static int kempld_create_platform_device(const struct dmi_system_id *id)
{
@@ -501,8 +500,6 @@ static int kempld_probe(struct platform_device *pdev)
ret = kempld_get_acpi_data(pdev);
if (ret)
return ret;
-
- kempld_acpi_mode = true;
} else if (kempld_pdev != pdev) {
/*
* The platform device we are probing is not the one we
@@ -555,6 +552,7 @@ static int kempld_remove(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id kempld_acpi_table[] = {
+ { "KEM0000", (kernel_ulong_t)&kempld_platform_data_generic },
{ "KEM0001", (kernel_ulong_t)&kempld_platform_data_generic },
{}
};
@@ -565,7 +563,6 @@ static struct platform_driver kempld_driver = {
.driver = {
.name = "kempld",
.acpi_match_table = ACPI_PTR(kempld_acpi_table),
- .probe_type = PROBE_FORCE_SYNCHRONOUS,
},
.probe = kempld_probe,
.remove = kempld_remove,
@@ -589,6 +586,14 @@ static const struct dmi_system_id kempld_dmi_table[] __initconst = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "BDV7",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-bDV7"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "BHL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -653,6 +658,14 @@ static const struct dmi_system_id kempld_dmi_table[] __initconst = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "CDV7",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-cDV7"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "CHL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -772,6 +785,22 @@ static const struct dmi_system_id kempld_dmi_table[] __initconst = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "A203",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "KBox A-203"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
+ .ident = "M4A1",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-m4AL"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "MAL1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -780,6 +809,14 @@ static const struct dmi_system_id kempld_dmi_table[] __initconst = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "MAPL",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "mITX-APL"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "MBR1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -828,6 +865,30 @@ static const struct dmi_system_id kempld_dmi_table[] __initconst = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "PAPL",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "pITX-APL"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
+ .ident = "SXAL",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "SMARC-sXAL"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
+ .ident = "SXAL4",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "SMARC-sXA4"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "UNP1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -867,8 +928,7 @@ static const struct dmi_system_id kempld_dmi_table[] __initconst = {
},
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
- },
- {
+ }, {
.ident = "UTH6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -876,6 +936,14 @@ static const struct dmi_system_id kempld_dmi_table[] __initconst = {
},
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
+ }, {
+ .ident = "Q7AL",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "Qseven-Q7AL"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
},
{}
};
@@ -884,7 +952,6 @@ MODULE_DEVICE_TABLE(dmi, kempld_dmi_table);
static int __init kempld_init(void)
{
const struct dmi_system_id *id;
- int ret;
if (force_device_id[0]) {
for (id = kempld_dmi_table;
@@ -894,24 +961,11 @@ static int __init kempld_init(void)
break;
if (id->matches[0].slot == DMI_NONE)
return -ENODEV;
- }
-
- ret = platform_driver_register(&kempld_driver);
- if (ret)
- return ret;
-
- /*
- * With synchronous probing the device should already be probed now.
- * If no device id is forced and also no ACPI definition for the
- * device was found, scan DMI table as fallback.
- *
- * If drivers_autoprobing is disabled and the device is found here,
- * only that device can be bound manually later.
- */
- if (!kempld_pdev && !kempld_acpi_mode)
+ } else {
dmi_check_system(kempld_dmi_table);
+ }
- return 0;
+ return platform_driver_register(&kempld_driver);
}
static void __exit kempld_exit(void)
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index 768d556b3fe9..c223d2c6a363 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -34,7 +34,7 @@
.num_resources = num_resource, \
}
-static struct resource chg_irqs[] = {
+static const struct resource chg_irqs[] = {
/* Charger Interrupts */
{
.start = LP8788_INT_CHG_INPUT_STATE,
@@ -58,7 +58,7 @@ static struct resource chg_irqs[] = {
},
};
-static struct resource rtc_irqs[] = {
+static const struct resource rtc_irqs[] = {
{
.start = LP8788_INT_RTC_ALARM1,
.end = LP8788_INT_RTC_ALARM2,
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 4ed6ad8ce002..a2abc0094def 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -38,6 +38,9 @@
#define MADERA_RESET_MIN_US 2000
#define MADERA_RESET_MAX_US 3000
+#define ERRATA_DCVDD_MIN_US 10000
+#define ERRATA_DCVDD_MAX_US 15000
+
static const char * const madera_core_supplies[] = {
"AVDD",
"DBVDD1",
@@ -291,6 +294,9 @@ static int __maybe_unused madera_runtime_resume(struct device *dev)
dev_dbg(dev, "Leaving sleep mode\n");
+ if (!madera->reset_errata)
+ madera_enable_hard_reset(madera);
+
ret = regulator_enable(madera->dcvdd);
if (ret) {
dev_err(dev, "Failed to enable DCVDD: %d\n", ret);
@@ -300,7 +306,22 @@ static int __maybe_unused madera_runtime_resume(struct device *dev)
regcache_cache_only(madera->regmap, false);
regcache_cache_only(madera->regmap_32bit, false);
- usleep_range(MADERA_RESET_MIN_US, MADERA_RESET_MAX_US);
+ if (madera->reset_errata)
+ usleep_range(ERRATA_DCVDD_MIN_US, ERRATA_DCVDD_MAX_US);
+ else
+ madera_disable_hard_reset(madera);
+
+ if (!madera->pdata.reset || madera->reset_errata) {
+ ret = madera_wait_for_boot(madera);
+ if (ret)
+ goto err;
+
+ ret = madera_soft_reset(madera);
+ if (ret) {
+ dev_err(dev, "Failed to reset: %d\n", ret);
+ goto err;
+ }
+ }
ret = madera_wait_for_boot(madera);
if (ret)
@@ -489,6 +510,8 @@ int madera_dev_init(struct madera *madera)
*/
switch (madera->type) {
case CS47L15:
+ madera->reset_errata = true;
+ break;
case CS47L35:
case CS47L90:
case CS47L91:
@@ -539,13 +562,19 @@ int madera_dev_init(struct madera *madera)
goto err_dcvdd;
}
+ if (madera->reset_errata)
+ madera_disable_hard_reset(madera);
+
ret = regulator_enable(madera->dcvdd);
if (ret) {
dev_err(dev, "Failed to enable DCVDD: %d\n", ret);
goto err_enable;
}
- madera_disable_hard_reset(madera);
+ if (madera->reset_errata)
+ usleep_range(ERRATA_DCVDD_MIN_US, ERRATA_DCVDD_MAX_US);
+ else
+ madera_disable_hard_reset(madera);
regcache_cache_only(madera->regmap, false);
regcache_cache_only(madera->regmap_32bit, false);
@@ -653,7 +682,7 @@ int madera_dev_init(struct madera *madera)
* It looks like a device we support. If we don't have a hard reset
* we can now attempt a soft reset.
*/
- if (!madera->pdata.reset) {
+ if (!madera->pdata.reset || madera->reset_errata) {
ret = madera_soft_reset(madera);
if (ret)
goto err_reset;
diff --git a/drivers/mfd/max77650.c b/drivers/mfd/max77650.c
index 60e07aca6ae5..777485a33bc0 100644
--- a/drivers/mfd/max77650.c
+++ b/drivers/mfd/max77650.c
@@ -221,7 +221,7 @@ MODULE_DEVICE_TABLE(of, max77650_of_match);
static struct i2c_driver max77650_i2c_driver = {
.driver = {
.name = "max77650",
- .of_match_table = of_match_ptr(max77650_of_match),
+ .of_match_table = max77650_of_match,
},
.probe_new = max77650_i2c_probe,
};
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 71faf503844b..2ad554b921d9 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -270,7 +270,7 @@ static struct i2c_driver max77686_i2c_driver = {
.driver = {
.name = "max77686",
.pm = &max77686_pm,
- .of_match_table = of_match_ptr(max77686_pmic_dt_match),
+ .of_match_table = max77686_pmic_dt_match,
},
.probe_new = max77686_i2c_probe,
};
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 0af6833b4080..eb3f061c8ee6 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -19,7 +19,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
-static struct resource bk_resources[] = {
+static const struct resource bk_resources[] = {
{ 0x84, 0x84, "mode control", IORESOURCE_REG, },
{ 0x85, 0x85, "control", IORESOURCE_REG, },
};
@@ -33,7 +33,7 @@ static struct mfd_cell bk_devs[] = {
},
};
-static struct resource touch_resources[] = {
+static const struct resource touch_resources[] = {
{
.name = "max8925-tsc",
.start = MAX8925_TSC_IRQ,
@@ -51,7 +51,7 @@ static const struct mfd_cell touch_devs[] = {
},
};
-static struct resource power_supply_resources[] = {
+static const struct resource power_supply_resources[] = {
{
.name = "max8925-power",
.start = MAX8925_CHG_IRQ1,
@@ -69,7 +69,7 @@ static const struct mfd_cell power_devs[] = {
},
};
-static struct resource rtc_resources[] = {
+static const struct resource rtc_resources[] = {
{
.name = "max8925-rtc",
.start = MAX8925_IRQ_RTC_ALARM0,
@@ -87,7 +87,7 @@ static const struct mfd_cell rtc_devs[] = {
},
};
-static struct resource onkey_resources[] = {
+static const struct resource onkey_resources[] = {
{
.name = "max8925-onkey",
.start = MAX8925_IRQ_GPM_SW_R,
@@ -110,95 +110,95 @@ static const struct mfd_cell onkey_devs[] = {
},
};
-static struct resource sd1_resources[] = {
+static const struct resource sd1_resources[] = {
{0x06, 0x06, "sdv", IORESOURCE_REG, },
};
-static struct resource sd2_resources[] = {
+static const struct resource sd2_resources[] = {
{0x09, 0x09, "sdv", IORESOURCE_REG, },
};
-static struct resource sd3_resources[] = {
+static const struct resource sd3_resources[] = {
{0x0c, 0x0c, "sdv", IORESOURCE_REG, },
};
-static struct resource ldo1_resources[] = {
+static const struct resource ldo1_resources[] = {
{0x1a, 0x1a, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo2_resources[] = {
+static const struct resource ldo2_resources[] = {
{0x1e, 0x1e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo3_resources[] = {
+static const struct resource ldo3_resources[] = {
{0x22, 0x22, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo4_resources[] = {
+static const struct resource ldo4_resources[] = {
{0x26, 0x26, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo5_resources[] = {
+static const struct resource ldo5_resources[] = {
{0x2a, 0x2a, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo6_resources[] = {
+static const struct resource ldo6_resources[] = {
{0x2e, 0x2e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo7_resources[] = {
+static const struct resource ldo7_resources[] = {
{0x32, 0x32, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo8_resources[] = {
+static const struct resource ldo8_resources[] = {
{0x36, 0x36, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo9_resources[] = {
+static const struct resource ldo9_resources[] = {
{0x3a, 0x3a, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo10_resources[] = {
+static const struct resource ldo10_resources[] = {
{0x3e, 0x3e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo11_resources[] = {
+static const struct resource ldo11_resources[] = {
{0x42, 0x42, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo12_resources[] = {
+static const struct resource ldo12_resources[] = {
{0x46, 0x46, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo13_resources[] = {
+static const struct resource ldo13_resources[] = {
{0x4a, 0x4a, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo14_resources[] = {
+static const struct resource ldo14_resources[] = {
{0x4e, 0x4e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo15_resources[] = {
+static const struct resource ldo15_resources[] = {
{0x52, 0x52, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo16_resources[] = {
+static const struct resource ldo16_resources[] = {
{0x12, 0x12, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo17_resources[] = {
+static const struct resource ldo17_resources[] = {
{0x16, 0x16, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo18_resources[] = {
+static const struct resource ldo18_resources[] = {
{0x74, 0x74, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo19_resources[] = {
+static const struct resource ldo19_resources[] = {
{0x5e, 0x5e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo20_resources[] = {
+static const struct resource ldo20_resources[] = {
{0x9e, 0x9e, "ldov", IORESOURCE_REG, },
};
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index b64d3315a5e1..07e0ca2e467c 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1119,7 +1119,7 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
}
- err = rtc_register_device(m->rtc);
+ err = devm_rtc_register_device(m->rtc);
if (err) {
if (alarm) {
menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c
index 2283d88adcc2..30d82bfe5b02 100644
--- a/drivers/mfd/motorola-cpcap.c
+++ b/drivers/mfd/motorola-cpcap.c
@@ -97,7 +97,7 @@ static struct regmap_irq_chip cpcap_irq_chip[CPCAP_NR_IRQ_CHIPS] = {
.ack_base = CPCAP_REG_MI1,
.mask_base = CPCAP_REG_MIM1,
.use_ack = true,
- .ack_invert = true,
+ .clear_ack = true,
},
{
.name = "cpcap-m2",
@@ -106,7 +106,7 @@ static struct regmap_irq_chip cpcap_irq_chip[CPCAP_NR_IRQ_CHIPS] = {
.ack_base = CPCAP_REG_MI2,
.mask_base = CPCAP_REG_MIM2,
.use_ack = true,
- .ack_invert = true,
+ .clear_ack = true,
},
{
.name = "cpcap1-4",
@@ -115,7 +115,7 @@ static struct regmap_irq_chip cpcap_irq_chip[CPCAP_NR_IRQ_CHIPS] = {
.ack_base = CPCAP_REG_INT1,
.mask_base = CPCAP_REG_INTM1,
.use_ack = true,
- .ack_invert = true,
+ .clear_ack = true,
},
};
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index f6cd8a660602..7518d74c3b4c 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -237,7 +237,7 @@ static struct platform_driver mt6397_driver = {
.probe = mt6397_probe,
.driver = {
.name = "mt6397",
- .of_match_table = of_match_ptr(mt6397_of_match),
+ .of_match_table = mt6397_of_match,
},
.id_table = mt6397_id,
};
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 1df1a2711328..a35d5cf16faa 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -36,6 +36,8 @@
#define PM8998_SUBTYPE 0x14
#define PMI8998_SUBTYPE 0x15
#define PM8005_SUBTYPE 0x18
+#define PM660L_SUBTYPE 0x1A
+#define PM660_SUBTYPE 0x1B
static const struct of_device_id pmic_spmi_id_table[] = {
{ .compatible = "qcom,spmi-pmic", .data = (void *)COMMON_SUBTYPE },
@@ -57,6 +59,8 @@ static const struct of_device_id pmic_spmi_id_table[] = {
{ .compatible = "qcom,pm8998", .data = (void *)PM8998_SUBTYPE },
{ .compatible = "qcom,pmi8998", .data = (void *)PMI8998_SUBTYPE },
{ .compatible = "qcom,pm8005", .data = (void *)PM8005_SUBTYPE },
+ { .compatible = "qcom,pm660l", .data = (void *)PM660L_SUBTYPE },
+ { .compatible = "qcom,pm660", .data = (void *)PM660_SUBTYPE },
{ }
};
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index fbb1faf95e27..c44a76285147 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -14,7 +14,7 @@
static struct rdc321x_wdt_pdata rdc321x_wdt_pdata;
-static struct resource rdc321x_wdt_resource[] = {
+static const struct resource rdc321x_wdt_resource[] = {
{
.name = "wdt-reg",
.start = RDC321X_WDT_CTRL,
@@ -27,7 +27,7 @@ static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = {
.max_gpios = RDC321X_NUM_GPIO,
};
-static struct resource rdc321x_gpio_resources[] = {
+static const struct resource rdc321x_gpio_resources[] = {
{
.name = "gpio-reg1",
.start = RDC321X_GPIO_CTRL_REG1,
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index e7d27b7861c1..c748fd29a220 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -45,7 +45,7 @@ struct retu_dev {
struct regmap_irq_chip_data *irq_data;
};
-static struct resource retu_pwrbutton_res[] = {
+static const struct resource retu_pwrbutton_res[] = {
{
.name = "retu-pwrbutton",
.start = RETU_INT_PWR,
@@ -84,7 +84,7 @@ static struct regmap_irq_chip retu_irq_chip = {
/* Retu device registered for the power off. */
static struct retu_dev *retu_pm_power_off;
-static struct resource tahvo_usb_res[] = {
+static const struct resource tahvo_usb_res[] = {
{
.name = "tahvo-usb",
.start = TAHVO_INT_VBUS,
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index d109b9f14407..ad923dd4e007 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -107,20 +107,20 @@ static const struct regmap_config rk817_regmap_config = {
.volatile_reg = rk817_is_volatile_reg,
};
-static struct resource rtc_resources[] = {
+static const struct resource rtc_resources[] = {
DEFINE_RES_IRQ(RK808_IRQ_RTC_ALARM),
};
-static struct resource rk817_rtc_resources[] = {
+static const struct resource rk817_rtc_resources[] = {
DEFINE_RES_IRQ(RK817_IRQ_RTC_ALARM),
};
-static struct resource rk805_key_resources[] = {
+static const struct resource rk805_key_resources[] = {
DEFINE_RES_IRQ(RK805_IRQ_PWRON_RISE),
DEFINE_RES_IRQ(RK805_IRQ_PWRON_FALL),
};
-static struct resource rk817_pwrkey_resources[] = {
+static const struct resource rk817_pwrkey_resources[] = {
DEFINE_RES_IRQ(RK817_IRQ_PWRON_RISE),
DEFINE_RES_IRQ(RK817_IRQ_PWRON_FALL),
};
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index 48381d9bf740..f1236a9acf30 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -122,7 +122,7 @@ MODULE_DEVICE_TABLE(of, rt5033_dt_match);
static struct i2c_driver rt5033_driver = {
.driver = {
.name = "rt5033",
- .of_match_table = of_match_ptr(rt5033_dt_match),
+ .of_match_table = rt5033_dt_match,
},
.probe = rt5033_i2c_probe,
.id_table = rt5033_i2c_id,
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index 5e680bfdf5c9..e095a3930142 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -329,11 +329,11 @@ static int stmfx_chip_init(struct i2c_client *client)
stmfx->vdd = devm_regulator_get_optional(&client->dev, "vdd");
ret = PTR_ERR_OR_ZERO(stmfx->vdd);
- if (ret == -ENODEV) {
- stmfx->vdd = NULL;
- } else {
- return dev_err_probe(&client->dev, ret,
- "Failed to get VDD regulator\n");
+ if (ret) {
+ if (ret == -ENODEV)
+ stmfx->vdd = NULL;
+ else
+ return dev_err_probe(&client->dev, ret, "Failed to get VDD regulator\n");
}
if (stmfx->vdd) {
@@ -548,7 +548,7 @@ MODULE_DEVICE_TABLE(of, stmfx_of_match);
static struct i2c_driver stmfx_driver = {
.driver = {
.name = "stmfx-core",
- .of_match_table = of_match_ptr(stmfx_of_match),
+ .of_match_table = stmfx_of_match,
.pm = &stmfx_dev_pm_ops,
},
.probe = stmfx_probe,
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 1aee3b3253fc..90f3292230c9 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(stmpe_set_altfunc);
* GPIO (all variants)
*/
-static struct resource stmpe_gpio_resources[] = {
+static const struct resource stmpe_gpio_resources[] = {
/* Start and end filled dynamically */
{
.flags = IORESOURCE_IRQ,
@@ -336,7 +336,7 @@ static const struct mfd_cell stmpe_gpio_cell_noirq = {
* Keypad (1601, 2401, 2403)
*/
-static struct resource stmpe_keypad_resources[] = {
+static const struct resource stmpe_keypad_resources[] = {
{
.name = "KEYPAD",
.flags = IORESOURCE_IRQ,
@@ -357,7 +357,7 @@ static const struct mfd_cell stmpe_keypad_cell = {
/*
* PWM (1601, 2401, 2403)
*/
-static struct resource stmpe_pwm_resources[] = {
+static const struct resource stmpe_pwm_resources[] = {
{
.name = "PWM0",
.flags = IORESOURCE_IRQ,
@@ -445,7 +445,7 @@ static struct stmpe_variant_info stmpe801_noirq = {
* Touchscreen (STMPE811 or STMPE610)
*/
-static struct resource stmpe_ts_resources[] = {
+static const struct resource stmpe_ts_resources[] = {
{
.name = "TOUCH_DET",
.flags = IORESOURCE_IRQ,
@@ -467,7 +467,7 @@ static const struct mfd_cell stmpe_ts_cell = {
* ADC (STMPE811)
*/
-static struct resource stmpe_adc_resources[] = {
+static const struct resource stmpe_adc_resources[] = {
{
.name = "STMPE_TEMP_SENS",
.flags = IORESOURCE_IRQ,
diff --git a/drivers/mfd/sun4i-gpadc.c b/drivers/mfd/sun4i-gpadc.c
index b346fbce3c01..cfe14d9bf6dc 100644
--- a/drivers/mfd/sun4i-gpadc.c
+++ b/drivers/mfd/sun4i-gpadc.c
@@ -18,7 +18,7 @@
#define ARCH_SUN5I_A13 1
#define ARCH_SUN6I_A31 2
-static struct resource adc_resources[] = {
+static const struct resource adc_resources[] = {
DEFINE_RES_IRQ_NAMED(SUN4I_GPADC_IRQ_FIFO_DATA, "FIFO_DATA_PENDING"),
DEFINE_RES_IRQ_NAMED(SUN4I_GPADC_IRQ_TEMP_DATA, "TEMP_DATA_PENDING"),
};
@@ -166,7 +166,7 @@ static int sun4i_gpadc_probe(struct platform_device *pdev)
static struct platform_driver sun4i_gpadc_driver = {
.driver = {
.name = "sun4i-gpadc",
- .of_match_table = of_match_ptr(sun4i_gpadc_of_match),
+ .of_match_table = sun4i_gpadc_of_match,
},
.probe = sun4i_gpadc_probe,
};
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index ca465794ea9c..c6f139b2e0c0 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -255,6 +255,24 @@ struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np,
}
EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle_args);
+/*
+ * It behaves the same as syscon_regmap_lookup_by_phandle() except where
+ * there is no regmap phandle. In this case, instead of returning -ENODEV,
+ * the function returns NULL.
+ */
+struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np,
+ const char *property)
+{
+ struct regmap *regmap;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, property);
+ if (IS_ERR(regmap) && PTR_ERR(regmap) == -ENODEV)
+ return NULL;
+
+ return regmap;
+}
+EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle_optional);
+
static int syscon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 7882a37ffc35..7614f8fe0e91 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -141,7 +141,7 @@ out:
}
EXPORT_SYMBOL_GPL(tc3589x_set_bits);
-static struct resource gpio_resources[] = {
+static const struct resource gpio_resources[] = {
{
.start = TC3589x_INT_GPIIRQ,
.end = TC3589x_INT_GPIIRQ,
@@ -149,7 +149,7 @@ static struct resource gpio_resources[] = {
},
};
-static struct resource keypad_resources[] = {
+static const struct resource keypad_resources[] = {
{
.start = TC3589x_INT_KBDIRQ,
.end = TC3589x_INT_KBDIRQ,
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index c66a701ab21c..e846e4d26b6e 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -25,7 +25,7 @@ struct tc6387xb {
struct resource rscr;
};
-static struct resource tc6387xb_mmc_resources[] = {
+static const struct resource tc6387xb_mmc_resources[] = {
{
.start = 0x800,
.end = 0x9ff,
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 05d5059ca203..3d5b14c60e20 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -133,7 +133,7 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
return 0;
}
-static struct resource tc6393xb_nand_resources[] = {
+static const struct resource tc6393xb_nand_resources[] = {
{
.start = 0x1000,
.end = 0x1007,
@@ -151,7 +151,7 @@ static struct resource tc6393xb_nand_resources[] = {
},
};
-static struct resource tc6393xb_mmc_resources[] = {
+static const struct resource tc6393xb_mmc_resources[] = {
{
.start = 0x800,
.end = 0x9ff,
@@ -192,7 +192,7 @@ static const struct resource tc6393xb_ohci_resources[] = {
},
};
-static struct resource tc6393xb_fb_resources[] = {
+static const struct resource tc6393xb_fb_resources[] = {
{
.start = 0x5000,
.end = 0x51ff,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 6cdf6c315034..bd6235308c6b 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -38,7 +38,7 @@
#define TPS65090_INT2_MASK_OVERLOAD_FET6 6
#define TPS65090_INT2_MASK_OVERLOAD_FET7 7
-static struct resource charger_resources[] = {
+static const struct resource charger_resources[] = {
{
.start = TPS65090_IRQ_VAC_STATUS_CHANGE,
.end = TPS65090_IRQ_VAC_STATUS_CHANGE,
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 2d9c282ec917..8027b0a9e14f 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -33,12 +33,12 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps65217.h>
-static struct resource charger_resources[] = {
+static const struct resource charger_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS65217_IRQ_AC, "AC"),
DEFINE_RES_IRQ_NAMED(TPS65217_IRQ_USB, "USB"),
};
-static struct resource pb_resources[] = {
+static const struct resource pb_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS65217_IRQ_PB, "PB"),
};
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index c36597797ddd..c9303d3d6602 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -92,7 +92,7 @@ static const struct tps6586x_irq_data tps6586x_irqs[] = {
[TPS6586X_INT_RTC_ALM2] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 1),
};
-static struct resource tps6586x_rtc_resources[] = {
+static const struct resource tps6586x_rtc_resources[] = {
{
.start = TPS6586X_INT_RTC_ALM1,
.end = TPS6586X_INT_RTC_ALM1,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 11959021b50a..6e105cca27d4 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -21,7 +21,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
-static struct resource rtc_resources[] = {
+static const struct resource rtc_resources[] = {
{
.start = TPS65910_IRQ_RTC_ALARM,
.end = TPS65910_IRQ_RTC_ALARM,
@@ -292,8 +292,8 @@ static int tps65910_ck32k_init(struct tps65910 *tps65910,
if (!pmic_pdata->en_ck32k_xtal)
return 0;
- ret = tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
- DEVCTRL_CK32K_CTRL_MASK);
+ ret = regmap_clear_bits(tps65910->regmap, TPS65910_DEVCTRL,
+ DEVCTRL_CK32K_CTRL_MASK);
if (ret < 0) {
dev_err(tps65910->dev, "clear ck32k_ctrl failed: %d\n", ret);
return ret;
@@ -314,17 +314,17 @@ static int tps65910_sleepinit(struct tps65910 *tps65910,
dev = tps65910->dev;
/* enabling SLEEP device state */
- ret = tps65910_reg_set_bits(tps65910, TPS65910_DEVCTRL,
- DEVCTRL_DEV_SLP_MASK);
+ ret = regmap_set_bits(tps65910->regmap, TPS65910_DEVCTRL,
+ DEVCTRL_DEV_SLP_MASK);
if (ret < 0) {
dev_err(dev, "set dev_slp failed: %d\n", ret);
goto err_sleep_init;
}
if (pmic_pdata->slp_keepon.therm_keepon) {
- ret = tps65910_reg_set_bits(tps65910,
- TPS65910_SLEEP_KEEP_RES_ON,
- SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK);
+ ret = regmap_set_bits(tps65910->regmap,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK);
if (ret < 0) {
dev_err(dev, "set therm_keepon failed: %d\n", ret);
goto disable_dev_slp;
@@ -332,9 +332,9 @@ static int tps65910_sleepinit(struct tps65910 *tps65910,
}
if (pmic_pdata->slp_keepon.clkout32k_keepon) {
- ret = tps65910_reg_set_bits(tps65910,
- TPS65910_SLEEP_KEEP_RES_ON,
- SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK);
+ ret = regmap_set_bits(tps65910->regmap,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK);
if (ret < 0) {
dev_err(dev, "set clkout32k_keepon failed: %d\n", ret);
goto disable_dev_slp;
@@ -342,9 +342,9 @@ static int tps65910_sleepinit(struct tps65910 *tps65910,
}
if (pmic_pdata->slp_keepon.i2chs_keepon) {
- ret = tps65910_reg_set_bits(tps65910,
- TPS65910_SLEEP_KEEP_RES_ON,
- SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK);
+ ret = regmap_set_bits(tps65910->regmap,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK);
if (ret < 0) {
dev_err(dev, "set i2chs_keepon failed: %d\n", ret);
goto disable_dev_slp;
@@ -354,8 +354,8 @@ static int tps65910_sleepinit(struct tps65910 *tps65910,
return 0;
disable_dev_slp:
- tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
- DEVCTRL_DEV_SLP_MASK);
+ regmap_clear_bits(tps65910->regmap, TPS65910_DEVCTRL,
+ DEVCTRL_DEV_SLP_MASK);
err_sleep_init:
return ret;
@@ -436,12 +436,18 @@ static void tps65910_power_off(void)
tps65910 = dev_get_drvdata(&tps65910_i2c_client->dev);
- if (tps65910_reg_set_bits(tps65910, TPS65910_DEVCTRL,
- DEVCTRL_PWR_OFF_MASK) < 0)
+ /*
+ * The PWR_OFF bit needs to be set separately, before transitioning
+ * to the OFF state. It enables the "sequential" power-off mode on
+ * TPS65911, it's a NO-OP on TPS65910.
+ */
+ if (regmap_set_bits(tps65910->regmap, TPS65910_DEVCTRL,
+ DEVCTRL_PWR_OFF_MASK) < 0)
return;
- tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
- DEVCTRL_DEV_ON_MASK);
+ regmap_update_bits(tps65910->regmap, TPS65910_DEVCTRL,
+ DEVCTRL_DEV_OFF_MASK | DEVCTRL_DEV_ON_MASK,
+ DEVCTRL_DEV_OFF_MASK);
}
static int tps65910_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 2334cd61c98d..8f4210075913 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -69,7 +69,7 @@ static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage)
return -EINVAL;
val = index << 1;
- ret = tps65910_reg_write(tps65910, tps_comp.reg, val);
+ ret = regmap_write(tps65910->regmap, tps_comp.reg, val);
return ret;
}
@@ -80,7 +80,7 @@ static int comp_threshold_get(struct tps65910 *tps65910, int id)
unsigned int val;
int ret;
- ret = tps65910_reg_read(tps65910, tps_comp.reg, &val);
+ ret = regmap_read(tps65910->regmap, tps_comp.reg, &val);
if (ret < 0)
return ret;
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
index 907452b86e32..902e33548dd0 100644
--- a/drivers/mfd/tps80031.c
+++ b/drivers/mfd/tps80031.c
@@ -34,7 +34,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-static struct resource tps80031_rtc_resources[] = {
+static const struct resource tps80031_rtc_resources[] = {
{
.start = TPS80031_INT_RTC_ALARM,
.end = TPS80031_INT_RTC_ALARM,
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index aff2dfbf3bf9..97af6c2a6007 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -356,7 +356,7 @@ static const struct irq_domain_ops twl6030_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-static const struct of_device_id twl6030_of_match[] = {
+static const struct of_device_id twl6030_of_match[] __maybe_unused = {
{.compatible = "ti,twl6030", &twl6030_interrupt_mapping},
{.compatible = "ti,twl6032", &twl6032_interrupt_mapping},
{ },
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index b0344e5353e4..bcef08f58fb3 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -616,7 +616,7 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
}
EXPORT_SYMBOL_GPL(wm831x_set_bits);
-static struct resource wm831x_dcdc1_resources[] = {
+static const struct resource wm831x_dcdc1_resources[] = {
{
.start = WM831X_DC1_CONTROL_1,
.end = WM831X_DC1_DVS_CONTROL,
@@ -637,7 +637,7 @@ static struct resource wm831x_dcdc1_resources[] = {
};
-static struct resource wm831x_dcdc2_resources[] = {
+static const struct resource wm831x_dcdc2_resources[] = {
{
.start = WM831X_DC2_CONTROL_1,
.end = WM831X_DC2_DVS_CONTROL,
@@ -657,7 +657,7 @@ static struct resource wm831x_dcdc2_resources[] = {
},
};
-static struct resource wm831x_dcdc3_resources[] = {
+static const struct resource wm831x_dcdc3_resources[] = {
{
.start = WM831X_DC3_CONTROL_1,
.end = WM831X_DC3_SLEEP_CONTROL,
@@ -671,7 +671,7 @@ static struct resource wm831x_dcdc3_resources[] = {
},
};
-static struct resource wm831x_dcdc4_resources[] = {
+static const struct resource wm831x_dcdc4_resources[] = {
{
.start = WM831X_DC4_CONTROL,
.end = WM831X_DC4_SLEEP_CONTROL,
@@ -685,7 +685,7 @@ static struct resource wm831x_dcdc4_resources[] = {
},
};
-static struct resource wm8320_dcdc4_buck_resources[] = {
+static const struct resource wm8320_dcdc4_buck_resources[] = {
{
.start = WM831X_DC4_CONTROL,
.end = WM832X_DC4_SLEEP_CONTROL,
@@ -699,7 +699,7 @@ static struct resource wm8320_dcdc4_buck_resources[] = {
},
};
-static struct resource wm831x_gpio_resources[] = {
+static const struct resource wm831x_gpio_resources[] = {
{
.start = WM831X_IRQ_GPIO_1,
.end = WM831X_IRQ_GPIO_16,
@@ -707,7 +707,7 @@ static struct resource wm831x_gpio_resources[] = {
},
};
-static struct resource wm831x_isink1_resources[] = {
+static const struct resource wm831x_isink1_resources[] = {
{
.start = WM831X_CURRENT_SINK_1,
.end = WM831X_CURRENT_SINK_1,
@@ -720,7 +720,7 @@ static struct resource wm831x_isink1_resources[] = {
},
};
-static struct resource wm831x_isink2_resources[] = {
+static const struct resource wm831x_isink2_resources[] = {
{
.start = WM831X_CURRENT_SINK_2,
.end = WM831X_CURRENT_SINK_2,
@@ -733,7 +733,7 @@ static struct resource wm831x_isink2_resources[] = {
},
};
-static struct resource wm831x_ldo1_resources[] = {
+static const struct resource wm831x_ldo1_resources[] = {
{
.start = WM831X_LDO1_CONTROL,
.end = WM831X_LDO1_SLEEP_CONTROL,
@@ -747,7 +747,7 @@ static struct resource wm831x_ldo1_resources[] = {
},
};
-static struct resource wm831x_ldo2_resources[] = {
+static const struct resource wm831x_ldo2_resources[] = {
{
.start = WM831X_LDO2_CONTROL,
.end = WM831X_LDO2_SLEEP_CONTROL,
@@ -761,7 +761,7 @@ static struct resource wm831x_ldo2_resources[] = {
},
};
-static struct resource wm831x_ldo3_resources[] = {
+static const struct resource wm831x_ldo3_resources[] = {
{
.start = WM831X_LDO3_CONTROL,
.end = WM831X_LDO3_SLEEP_CONTROL,
@@ -775,7 +775,7 @@ static struct resource wm831x_ldo3_resources[] = {
},
};
-static struct resource wm831x_ldo4_resources[] = {
+static const struct resource wm831x_ldo4_resources[] = {
{
.start = WM831X_LDO4_CONTROL,
.end = WM831X_LDO4_SLEEP_CONTROL,
@@ -789,7 +789,7 @@ static struct resource wm831x_ldo4_resources[] = {
},
};
-static struct resource wm831x_ldo5_resources[] = {
+static const struct resource wm831x_ldo5_resources[] = {
{
.start = WM831X_LDO5_CONTROL,
.end = WM831X_LDO5_SLEEP_CONTROL,
@@ -803,7 +803,7 @@ static struct resource wm831x_ldo5_resources[] = {
},
};
-static struct resource wm831x_ldo6_resources[] = {
+static const struct resource wm831x_ldo6_resources[] = {
{
.start = WM831X_LDO6_CONTROL,
.end = WM831X_LDO6_SLEEP_CONTROL,
@@ -817,7 +817,7 @@ static struct resource wm831x_ldo6_resources[] = {
},
};
-static struct resource wm831x_ldo7_resources[] = {
+static const struct resource wm831x_ldo7_resources[] = {
{
.start = WM831X_LDO7_CONTROL,
.end = WM831X_LDO7_SLEEP_CONTROL,
@@ -831,7 +831,7 @@ static struct resource wm831x_ldo7_resources[] = {
},
};
-static struct resource wm831x_ldo8_resources[] = {
+static const struct resource wm831x_ldo8_resources[] = {
{
.start = WM831X_LDO8_CONTROL,
.end = WM831X_LDO8_SLEEP_CONTROL,
@@ -845,7 +845,7 @@ static struct resource wm831x_ldo8_resources[] = {
},
};
-static struct resource wm831x_ldo9_resources[] = {
+static const struct resource wm831x_ldo9_resources[] = {
{
.start = WM831X_LDO9_CONTROL,
.end = WM831X_LDO9_SLEEP_CONTROL,
@@ -859,7 +859,7 @@ static struct resource wm831x_ldo9_resources[] = {
},
};
-static struct resource wm831x_ldo10_resources[] = {
+static const struct resource wm831x_ldo10_resources[] = {
{
.start = WM831X_LDO10_CONTROL,
.end = WM831X_LDO10_SLEEP_CONTROL,
@@ -873,7 +873,7 @@ static struct resource wm831x_ldo10_resources[] = {
},
};
-static struct resource wm831x_ldo11_resources[] = {
+static const struct resource wm831x_ldo11_resources[] = {
{
.start = WM831X_LDO11_ON_CONTROL,
.end = WM831X_LDO11_SLEEP_CONTROL,
@@ -881,7 +881,7 @@ static struct resource wm831x_ldo11_resources[] = {
},
};
-static struct resource wm831x_on_resources[] = {
+static const struct resource wm831x_on_resources[] = {
{
.start = WM831X_IRQ_ON,
.end = WM831X_IRQ_ON,
@@ -890,7 +890,7 @@ static struct resource wm831x_on_resources[] = {
};
-static struct resource wm831x_power_resources[] = {
+static const struct resource wm831x_power_resources[] = {
{
.name = "SYSLO",
.start = WM831X_IRQ_PPM_SYSLO,
@@ -959,7 +959,7 @@ static struct resource wm831x_power_resources[] = {
},
};
-static struct resource wm831x_rtc_resources[] = {
+static const struct resource wm831x_rtc_resources[] = {
{
.name = "PER",
.start = WM831X_IRQ_RTC_PER,
@@ -974,7 +974,7 @@ static struct resource wm831x_rtc_resources[] = {
},
};
-static struct resource wm831x_status1_resources[] = {
+static const struct resource wm831x_status1_resources[] = {
{
.start = WM831X_STATUS_LED_1,
.end = WM831X_STATUS_LED_1,
@@ -982,7 +982,7 @@ static struct resource wm831x_status1_resources[] = {
},
};
-static struct resource wm831x_status2_resources[] = {
+static const struct resource wm831x_status2_resources[] = {
{
.start = WM831X_STATUS_LED_2,
.end = WM831X_STATUS_LED_2,
@@ -990,7 +990,7 @@ static struct resource wm831x_status2_resources[] = {
},
};
-static struct resource wm831x_touch_resources[] = {
+static const struct resource wm831x_touch_resources[] = {
{
.name = "TCHPD",
.start = WM831X_IRQ_TCHPD,
@@ -1005,7 +1005,7 @@ static struct resource wm831x_touch_resources[] = {
},
};
-static struct resource wm831x_wdt_resources[] = {
+static const struct resource wm831x_wdt_resources[] = {
{
.start = WM831X_IRQ_WDOG_TO,
.end = WM831X_IRQ_WDOG_TO,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 3b2b93c5bbcb..7b1d270722ba 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -40,7 +40,7 @@ static const struct mfd_cell wm8994_regulator_devs[] = {
},
};
-static struct resource wm8994_codec_resources[] = {
+static const struct resource wm8994_codec_resources[] = {
{
.start = WM8994_IRQ_TEMP_SHUT,
.end = WM8994_IRQ_TEMP_WARN,
@@ -48,7 +48,7 @@ static struct resource wm8994_codec_resources[] = {
},
};
-static struct resource wm8994_gpio_resources[] = {
+static const struct resource wm8994_gpio_resources[] = {
{
.start = WM8994_IRQ_GPIO(1),
.end = WM8994_IRQ_GPIO(11),
@@ -683,7 +683,7 @@ static struct i2c_driver wm8994_i2c_driver = {
.driver = {
.name = "wm8994",
.pm = &wm8994_pm_ops,
- .of_match_table = of_match_ptr(wm8994_of_match),
+ .of_match_table = wm8994_of_match,
},
.probe = wm8994_i2c_probe,
.remove = wm8994_i2c_remove,
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 2aa6648fa41f..5a491d2cd1ae 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1512,6 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
struct pcr_handle *handle;
u32 base, len;
int ret, i, bar = 0;
+ u8 val;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1577,7 +1578,11 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-
+ rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+ if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+ pcr->aspm_enabled = false;
+ else
+ pcr->aspm_enabled = true;
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index beb482310a58..b2b3d2b0f808 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -472,8 +472,11 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cntr = &hdev->aggregated_cs_counters;
cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
- if (!cs)
+ if (!cs) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
return -ENOMEM;
+ }
cs->ctx = ctx;
cs->submitted = false;
@@ -486,6 +489,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_cs;
}
@@ -513,6 +518,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
if (!cs->jobs_in_queue_cnt) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_fence;
}
@@ -562,7 +569,7 @@ void hl_cs_rollback_all(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
- /* Make sure we don't have leftovers in the H/W queues mirror list */
+ /* Make sure we don't have leftovers in the CS mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
cs_get(cs);
cs->aborted = true;
@@ -764,11 +771,14 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
static int hl_cs_copy_chunk_array(struct hl_device *hdev,
struct hl_cs_chunk **cs_chunk_array,
- void __user *chunks, u32 num_chunks)
+ void __user *chunks, u32 num_chunks,
+ struct hl_ctx *ctx)
{
u32 size_to_copy;
if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Number of chunks can NOT be larger than %d\n",
HL_MAX_JOBS_PER_CS);
@@ -777,11 +787,16 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
GFP_ATOMIC);
- if (!*cs_chunk_array)
+ if (!*cs_chunk_array) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
kfree(*cs_chunk_array);
return -EFAULT;
@@ -797,6 +812,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
+ struct hl_ctx *ctx = hpriv->ctx;
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
@@ -805,7 +821,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ hpriv->ctx);
if (rc)
goto out;
@@ -832,8 +849,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
goto free_cs_object;
}
@@ -841,8 +858,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
atomic64_inc(
- &hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ &ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
goto free_cs_object;
}
@@ -856,8 +873,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
- atomic64_inc(
- &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -891,7 +907,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = cs_parser(hpriv, job);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
@@ -901,8 +917,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (int_queues_only) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
@@ -1042,7 +1058,7 @@ out:
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
- struct hl_cs_chunk *chunk, u64 *signal_seq)
+ struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
@@ -1052,6 +1068,8 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
/* currently only one signal seq is supported */
if (signal_seq_arr_len != 1) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Wait for signal CS supports only one signal CS seq\n");
return -EINVAL;
@@ -1060,13 +1078,18 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
signal_seq_arr = kmalloc_array(signal_seq_arr_len,
sizeof(*signal_seq_arr),
GFP_ATOMIC);
- if (!signal_seq_arr)
+ if (!signal_seq_arr) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Failed to copy signal seq array from user\n");
rc = -EFAULT;
@@ -1153,6 +1176,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
u32 q_idx, collective_engine_id = 0;
+ struct hl_cs_counters_atomic *cntr;
struct hl_fence *sig_fence = NULL;
struct hl_ctx *ctx = hpriv->ctx;
enum hl_queue_type q_type;
@@ -1160,9 +1184,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
u64 signal_seq;
int rc;
+ cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ ctx);
if (rc)
goto out;
@@ -1170,6 +1196,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
chunk = &cs_chunk_array[0];
if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
rc = -EINVAL;
@@ -1181,6 +1209,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
q_type = hw_queue_prop->type;
if (!hw_queue_prop->supports_sync_stream) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d does not support sync stream operations\n",
q_idx);
@@ -1190,6 +1220,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
@@ -1200,12 +1232,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
- rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq);
+ rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
if (rc)
goto free_cs_chunk_array;
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Failed to get signal CS with seq 0x%llx\n",
signal_seq);
@@ -1223,6 +1257,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
container_of(sig_fence, struct hl_cs_compl, base_fence);
if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"CS seq 0x%llx is not of a signal CS\n",
signal_seq);
@@ -1270,8 +1306,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
cs, q_idx, collective_engine_id);
- else
+ else {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
+ }
if (rc)
goto free_cs_object;
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 5871162a8442..69d04eca767f 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -17,12 +17,12 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (hdev->disabled)
- status = HL_DEVICE_STATUS_MALFUNCTION;
- else if (atomic_read(&hdev->in_reset))
+ if (atomic_read(&hdev->in_reset))
status = HL_DEVICE_STATUS_IN_RESET;
else if (hdev->needs_reset)
status = HL_DEVICE_STATUS_NEEDS_RESET;
+ else if (hdev->disabled)
+ status = HL_DEVICE_STATUS_MALFUNCTION;
else
status = HL_DEVICE_STATUS_OPERATIONAL;
@@ -1037,7 +1037,7 @@ kill_processes:
if (hard_reset) {
/* Release kernel context */
- if (hl_ctx_put(hdev->kernel_ctx) == 1)
+ if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
hdev->kernel_ctx = NULL;
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
@@ -1092,6 +1092,7 @@ kill_processes:
GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
+ hl_mmu_fini(hdev);
goto out_err;
}
@@ -1103,6 +1104,7 @@ kill_processes:
"failed to init kernel ctx in hard reset\n");
kfree(hdev->kernel_ctx);
hdev->kernel_ctx = NULL;
+ hl_mmu_fini(hdev);
goto out_err;
}
}
@@ -1485,6 +1487,15 @@ void hl_device_fini(struct hl_device *hdev)
}
}
+ /* Disable PCI access from device F/W so it won't send us additional
+ * interrupts. We disable MSI/MSI-X at the halt_engines function and we
+ * can't have the F/W sending us interrupts after that. We need to
+ * disable the access here because if the device is marked disable, the
+ * message won't be send. Also, in case of heartbeat, the device CPU is
+ * marked as disable so this message won't be sent
+ */
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+
/* Mark device as disabled */
hdev->disabled = true;
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 0e1c629e9800..c9a12980218a 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -402,6 +402,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
}
counters->rx_throughput = result;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
/* Fetch PCI tx counter */
pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -414,6 +418,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
counters->tx_throughput = result;
/* Fetch PCI replay counter */
+ memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
@@ -627,25 +632,38 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
security_status = RREG32(cpu_security_boot_status_reg);
/* We read security status multiple times during boot:
- * 1. preboot - we check if fw security feature is supported
- * 2. boot cpu - we get boot cpu security status
- * 3. FW application - we get FW application security status
+ * 1. preboot - a. Check whether the security status bits are valid
+ * b. Check whether fw security is enabled
+ * c. Check whether hard reset is done by preboot
+ * 2. boot cpu - a. Fetch boot cpu security status
+ * b. Check whether hard reset is done by boot cpu
+ * 3. FW application - a. Fetch fw application security status
+ * b. Check whether hard reset is done by fw app
*
* Preboot:
* Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
* check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
*/
if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
- hdev->asic_prop.fw_security_status_valid = 1;
- prop->fw_security_disabled =
- !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN);
+ prop->fw_security_status_valid = 1;
+
+ if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
+ prop->fw_security_disabled = false;
+ else
+ prop->fw_security_disabled = true;
+
+ if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
} else {
- hdev->asic_prop.fw_security_status_valid = 0;
+ prop->fw_security_status_valid = 0;
prop->fw_security_disabled = true;
}
+ dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
dev_info(hdev->dev, "firmware-level security is %s\n",
- prop->fw_security_disabled ? "disabled" : "enabled");
+ prop->fw_security_disabled ? "disabled" : "enabled");
return 0;
}
@@ -655,6 +673,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 status;
int rc;
@@ -723,11 +742,22 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
/* Read U-Boot version now in case we will later fail */
hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
+ /* Clear reset status since we need to read it again from boot CPU */
+ prop->hard_reset_done_by_fw = false;
+
/* Read boot_cpu security bits */
- if (hdev->asic_prop.fw_security_status_valid)
- hdev->asic_prop.fw_boot_cpu_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_boot_cpu_security_map =
RREG32(cpu_security_boot_status_reg);
+ if (prop->fw_boot_cpu_security_map &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
+ }
+
+ dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
if (rc) {
detect_cpu_boot_status(hdev, status);
rc = -EIO;
@@ -796,18 +826,21 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
+ /* Clear reset status since we need to read again from app */
+ prop->hard_reset_done_by_fw = false;
+
/* Read FW application security bits */
- if (hdev->asic_prop.fw_security_status_valid) {
- hdev->asic_prop.fw_app_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_app_security_map =
RREG32(cpu_security_boot_status_reg);
- if (hdev->asic_prop.fw_app_security_map &
+ if (prop->fw_app_security_map &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- hdev->asic_prop.hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = true;
}
- dev_dbg(hdev->dev, "Firmware hard-reset is %s\n",
- hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled");
+ dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
dev_info(hdev->dev, "Successfully loaded firmware to device\n");
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 571eda6ef5ab..60e16dc4bcac 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -944,7 +944,7 @@ struct hl_asic_funcs {
u32 (*get_signal_cb_size)(struct hl_device *hdev);
u32 (*get_wait_cb_size)(struct hl_device *hdev);
u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
u32 (*gen_wait_cb)(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
void (*reset_sob)(struct hl_device *hdev, void *data);
@@ -1000,6 +1000,7 @@ struct hl_va_range {
* @queue_full_drop_cnt: dropped due to queue full
* @device_in_reset_drop_cnt: dropped due to device in reset
* @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ * @validation_drop_cnt: dropped due to error in validation
*/
struct hl_cs_counters_atomic {
atomic64_t out_of_mem_drop_cnt;
@@ -1007,6 +1008,7 @@ struct hl_cs_counters_atomic {
atomic64_t queue_full_drop_cnt;
atomic64_t device_in_reset_drop_cnt;
atomic64_t max_cs_in_flight_drop_cnt;
+ atomic64_t validation_drop_cnt;
};
/**
@@ -2180,6 +2182,7 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops);
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 6bbb6bca6860..032d114f01ea 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -544,6 +544,7 @@ static struct pci_driver hl_pci_driver = {
.id_table = ids,
.probe = hl_pci_probe,
.remove = hl_pci_remove,
+ .shutdown = hl_pci_remove,
.driver.pm = &hl_pm_ops,
.err_handler = &hl_pci_err_handler,
};
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 32e6af1db4e3..d25892d61ec9 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -133,6 +133,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
&hw_idle.busy_engines_mask_ext, NULL);
+ hw_idle.busy_engines_mask =
+ lower_32_bits(hw_idle.busy_engines_mask_ext);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
@@ -335,6 +337,8 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
atomic64_read(&cntr->device_in_reset_drop_cnt);
cs_counters.total_max_cs_in_flight_drop_cnt =
atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
+ cs_counters.total_validation_drop_cnt =
+ atomic64_read(&cntr->validation_drop_cnt);
if (hpriv->ctx) {
cs_counters.ctx_out_of_mem_drop_cnt =
@@ -352,6 +356,9 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
cs_counters.ctx_max_cs_in_flight_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
+ cs_counters.ctx_validation_drop_cnt =
+ atomic64_read(
+ &hpriv->ctx->cs_counters.validation_drop_cnt);
}
return copy_to_user(out, &cs_counters,
@@ -406,7 +413,7 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv,
static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
- struct hl_pll_frequency_info freq_info = {0};
+ struct hl_pll_frequency_info freq_info = { {0} };
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 7caf868d1585..76217258780a 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -418,8 +418,11 @@ static void init_signal_cs(struct hl_device *hdev,
"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+ /* we set an EB since we must make sure all oeprations are done
+ * when sending the signal
+ */
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
- cs_cmpl->hw_sob->sob_id, 0);
+ cs_cmpl->hw_sob->sob_id, 0, true);
kref_get(&hw_sob->kref);
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index cbe9da4e0211..5d4fbdcaefe3 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -886,8 +886,10 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr, i;
+ bool is_host_addr;
u32 page_size;
+ is_host_addr = !hl_is_dram_va(hdev, vaddr);
page_size = phys_pg_pack->page_size;
next_vaddr = vaddr;
@@ -900,9 +902,13 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
/*
* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
+ *
+ * In addition, when unmapping host memory we pass through
+ * the Linux kernel to unpin the pages and that takes a long
+ * time. Therefore, sleep every 32K pages to avoid soft lockup
*/
- if (hdev->pldm)
- usleep_range(500, 1000);
+ if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+ usleep_range(50, 200);
}
}
diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c
index 33ae953d3a36..28a4638741d8 100644
--- a/drivers/misc/habanalabs/common/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu.c
@@ -9,7 +9,7 @@
#include "habanalabs.h"
-static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -156,7 +156,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
@@ -236,7 +236,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c
index 2ce6ea89d4fa..06d8a44dd5d4 100644
--- a/drivers/misc/habanalabs/common/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu_v1.c
@@ -467,8 +467,16 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
{
/* MMU H/W fini was already done in device hw_fini() */
- kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
- gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
+ kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+ gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ }
+
+ /* Make sure that if we arrive here again without init was called we
+ * won't cause kernel panic. This can happen for example if we fail
+ * during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
}
/**
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c
index 923b2606e29f..b4725e6101f6 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -130,10 +130,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
return 0;
- if (val & PCI_CONFIG_ELBI_STS_ERR) {
- dev_err(hdev->dev, "Error writing to ELBI\n");
+ if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
- }
if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
dev_err(hdev->dev, "ELBI write didn't finish in time\n");
@@ -160,8 +158,12 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
dbi_offset = addr & 0xFFF;
- rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+ /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+
+ rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
data);
if (rc)
@@ -244,9 +246,11 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
if (rc)
dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
@@ -294,9 +298,11 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
/* Enable */
rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
return rc;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 1f1926607c5e..b328ddaa64ee 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -151,19 +151,6 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
[PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
};
-static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = {
- [CPU_PLL] = mmPSOC_CPU_PLL_NR,
- [PCI_PLL] = mmPSOC_PCI_PLL_NR,
- [SRAM_PLL] = mmSRAM_W_PLL_NR,
- [HBM_PLL] = mmPSOC_HBM_PLL_NR,
- [NIC_PLL] = mmNIC0_PLL_NR,
- [DMA_PLL] = mmDMA_W_PLL_NR,
- [MESH_PLL] = mmMESH_W_PLL_NR,
- [MME_PLL] = mmPSOC_MME_PLL_NR,
- [TPC_PLL] = mmPSOC_TPC_PLL_NR,
- [IF_PLL] = mmIF_W_PLL_NR
-};
-
static inline bool validate_packet_id(enum packet_id id)
{
switch (id) {
@@ -374,7 +361,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev);
static void gaudi_disable_clock_gating(struct hl_device *hdev);
static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
@@ -667,12 +654,6 @@ static int gaudi_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -685,6 +666,12 @@ static int gaudi_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
return 0;
pci_fini:
@@ -703,93 +690,60 @@ static int gaudi_early_fini(struct hl_device *hdev)
}
/**
- * gaudi_fetch_pll_frequency - Fetch PLL frequency values
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
*
* @hdev: pointer to hl_device structure
- * @pll_index: index of the pll to fetch frequency from
- * @pll_freq: pointer to store the pll frequency in MHz in each of the available
- * outputs. if a certain output is not available a 0 will be set
*
*/
-static int gaudi_fetch_pll_frequency(struct hl_device *hdev,
- enum gaudi_pll_index pll_index,
- u16 *pll_freq_arr)
+static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
- u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel,
- pll_base_addr = gaudi_pll_base_addresses[pll_index];
- u16 freq = 0;
- int i, rc;
-
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_app_security_map &
- CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
- rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr);
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
- if (rc)
- return rc;
- } else if (hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_disabled) {
/* Backward compatibility */
- nr = RREG32(pll_base_addr + PLL_NR_OFFSET);
- nf = RREG32(pll_base_addr + PLL_NF_OFFSET);
- od = RREG32(pll_base_addr + PLL_OD_OFFSET);
-
- for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) {
- div_fctr = RREG32(pll_base_addr +
- PLL_DIV_FACTOR_0_OFFSET + i * 4);
- div_sel = RREG32(pll_base_addr +
- PLL_DIV_SEL_0_OFFSET + i * 4);
+ div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+ div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+ nr = RREG32(mmPSOC_CPU_PLL_NR);
+ nf = RREG32(mmPSOC_CPU_PLL_NF);
+ od = RREG32(mmPSOC_CPU_PLL_OD);
- if (div_sel == DIV_SEL_REF_CLK ||
+ if (div_sel == DIV_SEL_REF_CLK ||
div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- freq = PLL_REF_CLK;
- else
- freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) /
- ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- freq = pll_clk;
- else
- freq = pll_clk / (div_fctr + 1);
- } else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d",
- div_sel);
- }
-
- pll_freq_arr[i] = freq;
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
}
} else {
- dev_err(hdev->dev, "Failed to fetch PLL frequency values\n");
- return -EIO;
- }
+ rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
- return 0;
-}
-
-/**
- * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u16 pll_freq[HL_PLL_NUM_OUTPUTS];
- int rc;
+ if (rc)
+ return rc;
- rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq);
- if (rc)
- return rc;
+ freq = pll_freq_arr[2];
+ }
- prop->psoc_timestamp_frequency = pll_freq[2];
- prop->psoc_pci_pll_nr = 0;
- prop->psoc_pci_pll_nf = 0;
- prop->psoc_pci_pll_od = 0;
- prop->psoc_pci_pll_div_factor = 0;
+ prop->psoc_timestamp_frequency = freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
return 0;
}
@@ -884,11 +838,17 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
size_t fw_size;
void *cpu_addr;
dma_addr_t dma_handle;
- int rc;
+ int rc, count = 5;
+again:
rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+ if (rc == -EINTR && count-- > 0) {
+ msleep(50);
+ goto again;
+ }
+
if (rc) {
- dev_err(hdev->dev, "Firmware file %s is not found!\n",
+ dev_err(hdev->dev, "Failed to load firmware file %s\n",
GAUDI_TPC_FW_FILE);
goto out;
}
@@ -1110,7 +1070,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
prop->collective_sob_id, queue_id);
cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
- prop->collective_sob_id, cb_size);
+ prop->collective_sob_id, cb_size, false);
}
static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
@@ -2449,8 +2409,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_e2e(hdev);
gaudi_init_hbm_cred(hdev);
- hdev->asic_funcs->disable_clock_gating(hdev);
-
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
@@ -3462,6 +3420,9 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
if (hdev->in_debug)
return;
+ if (!hdev->asic_prop.fw_security_disabled)
+ return;
+
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
enable = !!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i])));
@@ -3513,7 +3474,7 @@ static void gaudi_disable_clock_gating(struct hl_device *hdev)
u32 qman_offset;
int i;
- if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+ if (!hdev->asic_prop.fw_security_disabled)
return;
for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
@@ -3806,7 +3767,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
static void gaudi_pre_hw_init(struct hl_device *hdev)
{
/* Perform read from the device to make sure device is up */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
if (hdev->asic_prop.fw_security_disabled) {
/* Set the access through PCI bars (Linux driver only) as
@@ -3847,6 +3808,13 @@ static int gaudi_hw_init(struct hl_device *hdev)
return rc;
}
+ /* In case the clock gating was enabled in preboot we need to disable
+ * it here before touching the MME/TPC registers.
+ * There is no need to take clk gating mutex because when this function
+ * runs, no other relevant code can run
+ */
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
/* SRAM scrambler must be initialized after CPU is running from HBM */
gaudi_init_scrambler_sram(hdev);
@@ -3885,7 +3853,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return 0;
@@ -3927,7 +3895,10 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
/* I don't know what is the state of the CPU so make sure it is
* stopped in any means necessary
*/
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_RST_DEV);
+ else
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
@@ -3971,11 +3942,15 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- }
- dev_info(hdev->dev,
- "Issued HARD reset command, going to wait %dms\n",
- reset_timeout_ms);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ dev_info(hdev->dev,
+ "Firmware performs HARD reset, going to wait %dms\n",
+ reset_timeout_ms);
+ }
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -4027,7 +4002,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
@@ -7936,7 +7912,7 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
}
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
struct hl_cb *cb = (struct hl_cb *) data;
struct packet_msg_short *pkt;
@@ -7953,7 +7929,7 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, eb);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index f2d91f4fcffe..a7ab2d7e57d4 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -105,13 +105,6 @@
#define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
#define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
-#define PLL_NR_OFFSET 0
-#define PLL_NF_OFFSET (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR)
-#define PLL_OD_OFFSET (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_FACTOR_0_OFFSET (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \
- mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_SEL_0_OFFSET (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR)
-
#define NUM_OF_SOB_IN_BLOCK \
(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 2e3612e1ee28..88a09d42e111 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -9,6 +9,7 @@
#include "../include/gaudi/gaudi_coresight.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
#include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_reg_map.h"
#include <uapi/misc/habanalabs.h>
#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
@@ -874,7 +875,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return rc;
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 3e5eb9e3d7bd..63679a747d2c 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -613,12 +613,6 @@ static int goya_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -631,6 +625,12 @@ static int goya_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
@@ -694,32 +694,47 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 trace_freq = 0;
- u32 pll_clk = 0;
- u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
- u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
- u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
- u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
- u32 od = RREG32(mmPSOC_PCI_PLL_OD);
-
- if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- trace_freq = PLL_REF_CLK;
- else
- trace_freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- trace_freq = pll_clk;
- else
- trace_freq = pll_clk / (div_fctr + 1);
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
+
+ if (hdev->asic_prop.fw_security_disabled) {
+ div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+ nr = RREG32(mmPSOC_PCI_PLL_NR);
+ nf = RREG32(mmPSOC_PCI_PLL_NF);
+ od = RREG32(mmPSOC_PCI_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK ||
+ div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
+ }
} else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d", div_sel);
+ rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+
+ if (rc)
+ return;
+
+ freq = pll_freq_arr[1];
}
- prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_timestamp_frequency = freq;
prop->psoc_pci_pll_nr = nr;
prop->psoc_pci_pll_nf = nf;
prop->psoc_pci_pll_od = od;
@@ -2704,7 +2719,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
@@ -5324,7 +5340,7 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev)
}
static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
return 0;
}
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index e5801ecf0cb2..b637dfd69f6e 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -145,11 +145,15 @@
* implemented. This means that FW will
* perform hard reset procedure on
* receiving the halt-machine event.
- * Initialized in: linux
+ * Initialized in: preboot, u-boot, linux
*
* CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -171,6 +175,7 @@
#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9)
#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10)
#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -204,6 +209,8 @@ enum kmd_msg {
KMD_MSG_GOTO_WFE,
KMD_MSG_FIT_RDY,
KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
};
enum cpu_msg_status {
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 1ef7888a12b5..6b888d04392d 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -10,6 +10,8 @@ lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += usercopy.o
lkdtm-$(CONFIG_LKDTM) += stackleak.o
lkdtm-$(CONFIG_LKDTM) += cfi.o
+lkdtm-$(CONFIG_LKDTM) += fortify.o
+lkdtm-$(CONFIG_PPC_BOOK3S_64) += powerpc.o
KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index a0675d4154d2..110f5a8538e9 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -482,3 +482,53 @@ noinline void lkdtm_CORRUPT_PAC(void)
pr_err("XFAIL: this test is arm64-only\n");
#endif
}
+
+void lkdtm_FORTIFY_OBJECT(void)
+{
+ struct target {
+ char a[10];
+ } target[2] = {};
+ int result;
+
+ /*
+ * Using volatile prevents the compiler from determining the value of
+ * 'size' at compile time. Without that, we would get a compile error
+ * rather than a runtime error.
+ */
+ volatile int size = 11;
+
+ pr_info("trying to read past the end of a struct\n");
+
+ result = memcmp(&target[0], &target[1], size);
+
+ /* Print result to prevent the code from being eliminated */
+ pr_err("FAIL: fortify did not catch an object overread!\n"
+ "\"%d\" was the memcmp result.\n", result);
+}
+
+void lkdtm_FORTIFY_SUBOBJECT(void)
+{
+ struct target {
+ char a[10];
+ char b[10];
+ } target;
+ char *src;
+
+ src = kmalloc(20, GFP_KERNEL);
+ strscpy(src, "over ten bytes", 20);
+
+ pr_info("trying to strcpy past the end of a member of a struct\n");
+
+ /*
+ * strncpy(target.a, src, 20); will hit a compile error because the
+ * compiler knows at build time that target.a < 20 bytes. Use strcpy()
+ * to force a runtime error.
+ */
+ strcpy(target.a, src);
+
+ /* Use target.a to prevent the code from being eliminated */
+ pr_err("FAIL: fortify did not catch an sub-object overrun!\n"
+ "\"%s\" was copied.\n", target.a);
+
+ kfree(src);
+}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 97803f213d9d..b2aff4d87c01 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -117,6 +117,8 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(UNSET_SMEP),
CRASHTYPE(CORRUPT_PAC),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+ CRASHTYPE(FORTIFY_OBJECT),
+ CRASHTYPE(FORTIFY_SUBOBJECT),
CRASHTYPE(OVERWRITE_ALLOCATION),
CRASHTYPE(WRITE_AFTER_FREE),
CRASHTYPE(READ_AFTER_FREE),
@@ -173,9 +175,13 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_KERNEL),
CRASHTYPE(STACKLEAK_ERASING),
CRASHTYPE(CFI_FORWARD_PROTO),
+ CRASHTYPE(FORTIFIED_STRSCPY),
#ifdef CONFIG_X86_32
CRASHTYPE(DOUBLE_FAULT),
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ CRASHTYPE(PPC_SLB_MULTIHIT),
+#endif
};
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
new file mode 100644
index 000000000000..faf29cf04baa
--- /dev/null
+++ b/drivers/misc/lkdtm/fortify.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Francis Laniel <laniel_francis@privacyrequired.com>
+ *
+ * Add tests related to fortified functions in this file.
+ */
+#include "lkdtm.h"
+#include <linux/string.h>
+#include <linux/slab.h>
+
+
+/*
+ * Calls fortified strscpy to test that it returns the same result as vanilla
+ * strscpy and generate a panic because there is a write overflow (i.e. src
+ * length is greater than dst length).
+ */
+void lkdtm_FORTIFIED_STRSCPY(void)
+{
+ char *src;
+ char dst[5];
+
+ struct {
+ union {
+ char big[10];
+ char src[5];
+ };
+ } weird = { .big = "hello!" };
+ char weird_dst[sizeof(weird.src) + 1];
+
+ src = kstrdup("foobar", GFP_KERNEL);
+
+ if (src == NULL)
+ return;
+
+ /* Vanilla strscpy returns -E2BIG if size is 0. */
+ if (strscpy(dst, src, 0) != -E2BIG)
+ pr_warn("FAIL: strscpy() of 0 length did not return -E2BIG\n");
+
+ /* Vanilla strscpy returns -E2BIG if src is truncated. */
+ if (strscpy(dst, src, sizeof(dst)) != -E2BIG)
+ pr_warn("FAIL: strscpy() did not return -E2BIG while src is truncated\n");
+
+ /* After above call, dst must contain "foob" because src was truncated. */
+ if (strncmp(dst, "foob", sizeof(dst)) != 0)
+ pr_warn("FAIL: after strscpy() dst does not contain \"foob\" but \"%s\"\n",
+ dst);
+
+ /* Shrink src so the strscpy() below succeeds. */
+ src[3] = '\0';
+
+ /*
+ * Vanilla strscpy returns number of character copied if everything goes
+ * well.
+ */
+ if (strscpy(dst, src, sizeof(dst)) != 3)
+ pr_warn("FAIL: strscpy() did not return 3 while src was copied entirely truncated\n");
+
+ /* After above call, dst must contain "foo" because src was copied. */
+ if (strncmp(dst, "foo", sizeof(dst)) != 0)
+ pr_warn("FAIL: after strscpy() dst does not contain \"foo\" but \"%s\"\n",
+ dst);
+
+ /* Test when src is embedded inside a union. */
+ strscpy(weird_dst, weird.src, sizeof(weird_dst));
+
+ if (strcmp(weird_dst, "hello") != 0)
+ pr_warn("FAIL: after strscpy() weird_dst does not contain \"hello\" but \"%s\"\n",
+ weird_dst);
+
+ /* Restore src to its initial value. */
+ src[3] = 'b';
+
+ /*
+ * Use strlen here so size cannot be known at compile time and there is
+ * a runtime write overflow.
+ */
+ strscpy(dst, src, strlen(src));
+
+ pr_warn("FAIL: No overflow in above strscpy()\n");
+
+ kfree(src);
+}
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 6dec4c9b442f..5ae48c64df24 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
-/* lkdtm_bugs.c */
+/* bugs.c */
void __init lkdtm_bugs_init(int *recur_param);
void lkdtm_PANIC(void);
void lkdtm_BUG(void);
@@ -32,8 +32,10 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
void lkdtm_UNSET_SMEP(void);
void lkdtm_DOUBLE_FAULT(void);
void lkdtm_CORRUPT_PAC(void);
+void lkdtm_FORTIFY_OBJECT(void);
+void lkdtm_FORTIFY_SUBOBJECT(void);
-/* lkdtm_heap.c */
+/* heap.c */
void __init lkdtm_heap_init(void);
void __exit lkdtm_heap_exit(void);
void lkdtm_OVERWRITE_ALLOCATION(void);
@@ -45,7 +47,7 @@ void lkdtm_SLAB_FREE_DOUBLE(void);
void lkdtm_SLAB_FREE_CROSS(void);
void lkdtm_SLAB_FREE_PAGE(void);
-/* lkdtm_perms.c */
+/* perms.c */
void __init lkdtm_perms_init(void);
void lkdtm_WRITE_RO(void);
void lkdtm_WRITE_RO_AFTER_INIT(void);
@@ -60,7 +62,7 @@ void lkdtm_EXEC_NULL(void);
void lkdtm_ACCESS_USERSPACE(void);
void lkdtm_ACCESS_NULL(void);
-/* lkdtm_refcount.c */
+/* refcount.c */
void lkdtm_REFCOUNT_INC_OVERFLOW(void);
void lkdtm_REFCOUNT_ADD_OVERFLOW(void);
void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void);
@@ -81,10 +83,10 @@ void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void);
void lkdtm_REFCOUNT_TIMING(void);
void lkdtm_ATOMIC_TIMING(void);
-/* lkdtm_rodata.c */
+/* rodata.c */
void lkdtm_rodata_do_nothing(void);
-/* lkdtm_usercopy.c */
+/* usercopy.c */
void __init lkdtm_usercopy_init(void);
void __exit lkdtm_usercopy_exit(void);
void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
@@ -96,10 +98,16 @@ void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
void lkdtm_USERCOPY_STACK_BEYOND(void);
void lkdtm_USERCOPY_KERNEL(void);
-/* lkdtm_stackleak.c */
+/* stackleak.c */
void lkdtm_STACKLEAK_ERASING(void);
/* cfi.c */
void lkdtm_CFI_FORWARD_PROTO(void);
+/* fortify.c */
+void lkdtm_FORTIFIED_STRSCPY(void);
+
+/* powerpc.c */
+void lkdtm_PPC_SLB_MULTIHIT(void);
+
#endif
diff --git a/drivers/misc/lkdtm/powerpc.c b/drivers/misc/lkdtm/powerpc.c
new file mode 100644
index 000000000000..077c9f9ed8d0
--- /dev/null
+++ b/drivers/misc/lkdtm/powerpc.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/mmu.h>
+
+/* Inserts new slb entries */
+static void insert_slb_entry(unsigned long p, int ssize, int page_size)
+{
+ unsigned long flags;
+
+ flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp;
+ preempt_disable();
+
+ asm volatile("slbmte %0,%1" :
+ : "r" (mk_vsid_data(p, ssize, flags)),
+ "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED))
+ : "memory");
+
+ asm volatile("slbmte %0,%1" :
+ : "r" (mk_vsid_data(p, ssize, flags)),
+ "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1))
+ : "memory");
+ preempt_enable();
+}
+
+/* Inject slb multihit on vmalloc-ed address i.e 0xD00... */
+static int inject_vmalloc_slb_multihit(void)
+{
+ char *p;
+
+ p = vmalloc(PAGE_SIZE);
+ if (!p)
+ return -ENOMEM;
+
+ insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_vmalloc_psize);
+ /*
+ * This triggers exception, If handled correctly we must recover
+ * from this error.
+ */
+ p[0] = '!';
+ vfree(p);
+ return 0;
+}
+
+/* Inject slb multihit on kmalloc-ed address i.e 0xC00... */
+static int inject_kmalloc_slb_multihit(void)
+{
+ char *p;
+
+ p = kmalloc(2048, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_linear_psize);
+ /*
+ * This triggers exception, If handled correctly we must recover
+ * from this error.
+ */
+ p[0] = '!';
+ kfree(p);
+ return 0;
+}
+
+/*
+ * Few initial SLB entries are bolted. Add a test to inject
+ * multihit in bolted entry 0.
+ */
+static void insert_dup_slb_entry_0(void)
+{
+ unsigned long test_address = PAGE_OFFSET, *test_ptr;
+ unsigned long esid, vsid;
+ unsigned long i = 0;
+
+ test_ptr = (unsigned long *)test_address;
+ preempt_disable();
+
+ asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
+
+ /* for i !=0 we would need to mask out the old entry number */
+ asm volatile("slbmte %0,%1" :
+ : "r" (vsid),
+ "r" (esid | SLB_NUM_BOLTED)
+ : "memory");
+
+ asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
+
+ /* for i !=0 we would need to mask out the old entry number */
+ asm volatile("slbmte %0,%1" :
+ : "r" (vsid),
+ "r" (esid | (SLB_NUM_BOLTED + 1))
+ : "memory");
+
+ pr_info("%s accessing test address 0x%lx: 0x%lx\n",
+ __func__, test_address, *test_ptr);
+
+ preempt_enable();
+}
+
+void lkdtm_PPC_SLB_MULTIHIT(void)
+{
+ if (!radix_enabled()) {
+ pr_info("Injecting SLB multihit errors\n");
+ /*
+ * These need not be separate tests, And they do pretty
+ * much same thing. In any case we must recover from the
+ * errors introduced by these functions, machine would not
+ * survive these tests in case of failure to handle.
+ */
+ inject_vmalloc_slb_multihit();
+ inject_kmalloc_slb_multihit();
+ insert_dup_slb_entry_0();
+ pr_info("Recovered from SLB multihit errors\n");
+ } else {
+ pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
+ }
+}
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index c21f65a5c762..9eb0d93b01c6 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -70,6 +70,7 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
{
int rc;
unsigned long pidr = 0;
+ struct pci_dev *dev;
// Locks both status & tidr
mutex_lock(&ctx->status_mutex);
@@ -81,8 +82,9 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
if (mm)
pidr = mm->context.id;
+ dev = to_pci_dev(ctx->afu->fn->dev.parent);
rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr,
- amr, mm, xsl_fault_error, ctx);
+ amr, pci_dev_id(dev), mm, xsl_fault_error, ctx);
if (rc)
goto out;
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index fd73d3bc0eb6..ab039c115381 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -2,8 +2,10 @@
// Copyright 2017 IBM Corp.
#include <linux/sched/mm.h>
#include <linux/mutex.h>
+#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
#include <asm/copro.h>
#include <asm/pnv-ocxl.h>
#include <asm/xive.h>
@@ -33,6 +35,7 @@
#define SPA_PE_VALID 0x80000000
+struct ocxl_link;
struct pe_data {
struct mm_struct *mm;
@@ -41,6 +44,8 @@ struct pe_data {
/* opaque pointer to be passed to the above callback */
void *xsl_err_data;
struct rcu_head rcu;
+ struct ocxl_link *link;
+ struct mmu_notifier mmu_notifier;
};
struct spa {
@@ -83,6 +88,8 @@ struct ocxl_link {
int domain;
int bus;
int dev;
+ void __iomem *arva; /* ATSD register virtual address */
+ spinlock_t atsd_lock; /* to serialize shootdowns */
atomic_t irq_available;
struct spa *spa;
void *platform_data;
@@ -388,6 +395,7 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l
link->bus = dev->bus->number;
link->dev = PCI_SLOT(dev->devfn);
atomic_set(&link->irq_available, MAX_IRQ_PER_LINK);
+ spin_lock_init(&link->atsd_lock);
rc = alloc_spa(dev, link);
if (rc)
@@ -403,6 +411,13 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l
if (rc)
goto err_xsl_irq;
+ /* if link->arva is not defeined, MMIO registers are not used to
+ * generate TLB invalidate. PowerBus snooping is enabled.
+ * Otherwise, PowerBus snooping is disabled. TLB Invalidates are
+ * initiated using MMIO registers.
+ */
+ pnv_ocxl_map_lpar(dev, mfspr(SPRN_LPID), 0, &link->arva);
+
*out_link = link;
return 0;
@@ -454,6 +469,11 @@ static void release_xsl(struct kref *ref)
{
struct ocxl_link *link = container_of(ref, struct ocxl_link, ref);
+ if (link->arva) {
+ pnv_ocxl_unmap_lpar(link->arva);
+ link->arva = NULL;
+ }
+
list_del(&link->list);
/* call platform code before releasing data */
pnv_ocxl_spa_release(link->platform_data);
@@ -470,6 +490,27 @@ void ocxl_link_release(struct pci_dev *dev, void *link_handle)
}
EXPORT_SYMBOL_GPL(ocxl_link_release);
+static void invalidate_range(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pe_data *pe_data = container_of(mn, struct pe_data, mmu_notifier);
+ struct ocxl_link *link = pe_data->link;
+ unsigned long addr, pid, page_size = PAGE_SIZE;
+
+ pid = mm->context.id;
+ trace_ocxl_mmu_notifier_range(start, end, pid);
+
+ spin_lock(&link->atsd_lock);
+ for (addr = start; addr < end; addr += page_size)
+ pnv_ocxl_tlb_invalidate(link->arva, pid, addr, page_size);
+ spin_unlock(&link->atsd_lock);
+}
+
+static const struct mmu_notifier_ops ocxl_mmu_notifier_ops = {
+ .invalidate_range = invalidate_range,
+};
+
static u64 calculate_cfg_state(bool kernel)
{
u64 state;
@@ -494,7 +535,7 @@ static u64 calculate_cfg_state(bool kernel)
}
int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
- u64 amr, struct mm_struct *mm,
+ u64 amr, u16 bdf, struct mm_struct *mm,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data)
{
@@ -526,9 +567,13 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
pe_data->mm = mm;
pe_data->xsl_err_cb = xsl_err_cb;
pe_data->xsl_err_data = xsl_err_data;
+ pe_data->link = link;
+ pe_data->mmu_notifier.ops = &ocxl_mmu_notifier_ops;
memset(pe, 0, sizeof(struct ocxl_process_element));
pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0));
+ pe->pasid = cpu_to_be32(pasid << (31 - 19));
+ pe->bdf = cpu_to_be16(bdf);
pe->lpid = cpu_to_be32(mfspr(SPRN_LPID));
pe->pid = cpu_to_be32(pidr);
pe->tid = cpu_to_be32(tidr);
@@ -540,8 +585,17 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
* by the nest MMU. If we have a kernel context, TLBIs are
* already global.
*/
- if (mm)
+ if (mm) {
mm_context_add_copro(mm);
+ if (link->arva) {
+ /* Use MMIO registers for the TLB Invalidate
+ * operations.
+ */
+ trace_ocxl_init_mmu_notifier(pasid, mm->context.id);
+ mmu_notifier_register(&pe_data->mmu_notifier, mm);
+ }
+ }
+
/*
* Barrier is to make sure PE is visible in the SPA before it
* is used by the device. It also helps with the global TLBI
@@ -672,6 +726,18 @@ int ocxl_link_remove_pe(void *link_handle, int pasid)
WARN(1, "Couldn't find pe data when removing PE\n");
} else {
if (pe_data->mm) {
+ if (link->arva) {
+ trace_ocxl_release_mmu_notifier(pasid,
+ pe_data->mm->context.id);
+ mmu_notifier_unregister(&pe_data->mmu_notifier,
+ pe_data->mm);
+ spin_lock(&link->atsd_lock);
+ pnv_ocxl_tlb_invalidate(link->arva,
+ pe_data->mm->context.id,
+ 0ull,
+ PAGE_SIZE);
+ spin_unlock(&link->atsd_lock);
+ }
mm_context_remove_copro(pe_data->mm);
mmdrop(pe_data->mm);
}
diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
index 0bad0a123af6..10125a22d5a5 100644
--- a/drivers/misc/ocxl/ocxl_internal.h
+++ b/drivers/misc/ocxl/ocxl_internal.h
@@ -84,13 +84,16 @@ struct ocxl_context {
struct ocxl_process_element {
__be64 config_state;
- __be32 reserved1[11];
+ __be32 pasid;
+ __be16 bdf;
+ __be16 reserved1;
+ __be32 reserved2[9];
__be32 lpid;
__be32 tid;
__be32 pid;
- __be32 reserved2[10];
+ __be32 reserved3[10];
__be64 amr;
- __be32 reserved3[3];
+ __be32 reserved4[3];
__be32 software_state;
};
diff --git a/drivers/misc/ocxl/trace.h b/drivers/misc/ocxl/trace.h
index 17e21cb2addd..a33a5094ff6c 100644
--- a/drivers/misc/ocxl/trace.h
+++ b/drivers/misc/ocxl/trace.h
@@ -8,6 +8,70 @@
#include <linux/tracepoint.h>
+
+TRACE_EVENT(ocxl_mmu_notifier_range,
+ TP_PROTO(unsigned long start, unsigned long end, unsigned long pidr),
+ TP_ARGS(start, end, pidr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned long, pidr)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->pidr = pidr;
+ ),
+
+ TP_printk("start=0x%lx end=0x%lx pidr=0x%lx",
+ __entry->start,
+ __entry->end,
+ __entry->pidr
+ )
+);
+
+TRACE_EVENT(ocxl_init_mmu_notifier,
+ TP_PROTO(int pasid, unsigned long pidr),
+ TP_ARGS(pasid, pidr),
+
+ TP_STRUCT__entry(
+ __field(int, pasid)
+ __field(unsigned long, pidr)
+ ),
+
+ TP_fast_assign(
+ __entry->pasid = pasid;
+ __entry->pidr = pidr;
+ ),
+
+ TP_printk("pasid=%d, pidr=0x%lx",
+ __entry->pasid,
+ __entry->pidr
+ )
+);
+
+TRACE_EVENT(ocxl_release_mmu_notifier,
+ TP_PROTO(int pasid, unsigned long pidr),
+ TP_ARGS(pasid, pidr),
+
+ TP_STRUCT__entry(
+ __field(int, pasid)
+ __field(unsigned long, pidr)
+ ),
+
+ TP_fast_assign(
+ __entry->pasid = pasid;
+ __entry->pidr = pidr;
+ ),
+
+ TP_printk("pasid=%d, pidr=0x%lx",
+ __entry->pasid,
+ __entry->pidr
+ )
+);
+
DECLARE_EVENT_CLASS(ocxl_context,
TP_PROTO(pid_t pid, void *spa, int pasid, u32 pidr, u32 tidr),
TP_ARGS(pid, spa, pasid, pidr, tidr),
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 951b37da5e3c..41cab297d66e 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -55,12 +55,23 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_mem_or_io(pdev, 0);
- if (res && resource_type(res) == IORESOURCE_IO)
+ if (!res)
+ return -EINVAL;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
base = devm_ioport_map(dev, res->start, resource_size(res));
- else
+ if (!base)
+ return -ENOMEM;
+ break;
+ case IORESOURCE_MEM:
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ break;
+ default:
+ return -EINVAL;
+ }
atomic_notifier_chain_register(&panic_notifier_list,
&pvpanic_panic_nb);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index de7cb0369c30..002426e3cf76 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -384,8 +384,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
"merging was advertised but not possible");
blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
- if (mmc_card_mmc(card))
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
block_size = card->ext_csd.data_sector_size;
+ WARN_ON(block_size != 512 && block_size != 4096);
+ }
blk_queue_logical_block_size(mq->queue, block_size);
/*
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 44bea5e4aeda..b23773583179 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -20,6 +20,8 @@
#include "sdio_cis.h"
#include "sdio_ops.h"
+#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
+
static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
@@ -274,6 +276,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
do {
unsigned char tpl_code, tpl_link;
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
if (ret)
@@ -326,6 +330,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
prev = &this->next;
if (ret == -ENOENT) {
+ if (time_after(jiffies, timeout))
+ break;
/* warn about unknown tuples */
pr_warn_ratelimited("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n",
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index bbf3496f4495..f9780c65ebe9 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -314,11 +314,7 @@ err_clk:
static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
{
- int ret;
-
- ret = sdhci_pltfm_unregister(pdev);
- if (ret)
- dev_err(&pdev->dev, "failed to shutdown\n");
+ sdhci_pltfm_suspend(&pdev->dev);
}
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 4b673792b5a4..d90020ed3622 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -16,6 +16,8 @@
#include "sdhci-pltfm.h"
+#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16)
+
/* DWCMSHC specific Mode Select value */
#define DWCMSHC_CTRL_HS400 0x7
@@ -49,6 +51,29 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
sdhci_adma_write_desc(host, desc, addr, len, cmd);
}
+static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /*
+ * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
+ * block count register which doesn't support stuff bits of
+ * CMD23 argument on dwcmsch host controller.
+ */
+ if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
+ host->flags &= ~SDHCI_AUTO_CMD23;
+ else
+ host->flags |= SDHCI_AUTO_CMD23;
+}
+
+static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ dwcmshc_check_auto_cmd23(mmc, mrq);
+
+ sdhci_request(mmc, mrq);
+}
+
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
@@ -133,6 +158,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ host->mmc_host_ops.request = dwcmshc_request;
+
err = sdhci_add_host(host);
if (err)
goto err_clk;
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 6301b81cf573..9bd717ff784b 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -111,8 +111,13 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
return host->private;
}
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#ifdef CONFIG_PM_SLEEP
int sdhci_pltfm_suspend(struct device *dev);
int sdhci_pltfm_resume(struct device *dev);
-extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#else
+static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; }
+static inline int sdhci_pltfm_resume(struct device *dev) { return 0; }
+#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index c67611fdaa8a..d19eef5f725f 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -168,7 +168,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
/* Disable tuning request and auto-retuning again */
xenon_retune_setup(host);
- xenon_set_acg(host, true);
+ /*
+ * The ACG should be turned off at the early init time, in order
+ * to solve a possible issues with the 1.8V regulator stabilization.
+ * The feature is enabled in later stage.
+ */
+ xenon_set_acg(host, false);
xenon_set_sdclk_off_idle(host, sdhc_id, false);
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 6ddab796216d..8bab6f8718a9 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -152,6 +152,7 @@ config SM_FTL
tristate "SmartMedia/xD new translation layer"
depends on BLOCK
select MTD_BLKDEVS
+ select MTD_NAND_CORE
select MTD_NAND_ECC_SW_HAMMING
help
This enables EXPERIMENTAL R/W support for SmartMedia/xD
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index a030792115bc..5b0ae5ddad74 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -816,7 +816,7 @@ static void doc_read_page_finish(struct docg3 *docg3)
/**
* calc_block_sector - Calculate blocks, pages and ofs.
-
+ *
* @from: offset in flash
* @block0: first plane block index calculated
* @block1: second plane block index calculated
@@ -1783,10 +1783,9 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
/**
* doc_probe_device - Check if a device is available
- * @base: the io space where the device is probed
+ * @cascade: the cascade of chips this devices will belong to
* @floor: the floor of the probed device
* @dev: the device
- * @cascade: the cascade of chips this devices will belong to
*
* Checks whether a device at the specified IO range, and floor is available.
*
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 087b5e86d1bf..cfd170946ba4 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -1,19 +1,19 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
* Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de>
*
* Usage:
*
* one commend line parameter per device, each in the form:
- * phram=<name>,<start>,<len>
+ * phram=<name>,<start>,<len>[,<erasesize>]
* <name> may be up to 63 characters.
- * <start> and <len> can be octal, decimal or hexadecimal. If followed
+ * <start>, <len>, and <erasesize> can be octal, decimal or hexadecimal. If followed
* by "ki", "Mi" or "Gi", the numbers will be interpreted as kilo, mega or
- * gigabytes.
+ * gigabytes. <erasesize> is optional and defaults to PAGE_SIZE.
*
* Example:
- * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi
+ * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi,64Ki
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -26,6 +26,7 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <asm/div64.h>
struct phram_mtd_list {
struct mtd_info mtd;
@@ -88,7 +89,7 @@ static void unregister_devices(void)
}
}
-static int register_device(char *name, phys_addr_t start, size_t len)
+static int register_device(char *name, phys_addr_t start, size_t len, uint32_t erasesize)
{
struct phram_mtd_list *new;
int ret = -ENOMEM;
@@ -115,7 +116,7 @@ static int register_device(char *name, phys_addr_t start, size_t len)
new->mtd._write = phram_write;
new->mtd.owner = THIS_MODULE;
new->mtd.type = MTD_RAM;
- new->mtd.erasesize = PAGE_SIZE;
+ new->mtd.erasesize = erasesize;
new->mtd.writesize = 1;
ret = -EAGAIN;
@@ -204,22 +205,23 @@ static inline void kill_final_newline(char *str)
static int phram_init_called;
/*
* This shall contain the module parameter if any. It is of the form:
- * - phram=<device>,<address>,<size> for module case
- * - phram.phram=<device>,<address>,<size> for built-in case
- * We leave 64 bytes for the device name, 20 for the address and 20 for the
- * size.
- * Example: phram.phram=rootfs,0xa0000000,512Mi
+ * - phram=<device>,<address>,<size>[,<erasesize>] for module case
+ * - phram.phram=<device>,<address>,<size>[,<erasesize>] for built-in case
+ * We leave 64 bytes for the device name, 20 for the address , 20 for the
+ * size and 20 for the erasesize.
+ * Example: phram.phram=rootfs,0xa0000000,512Mi,65536
*/
-static char phram_paramline[64 + 20 + 20];
+static char phram_paramline[64 + 20 + 20 + 20];
#endif
static int phram_setup(const char *val)
{
- char buf[64 + 20 + 20], *str = buf;
- char *token[3];
+ char buf[64 + 20 + 20 + 20], *str = buf;
+ char *token[4];
char *name;
uint64_t start;
uint64_t len;
+ uint64_t erasesize = PAGE_SIZE;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf))
@@ -228,7 +230,7 @@ static int phram_setup(const char *val)
strcpy(str, val);
kill_final_newline(str);
- for (i = 0; i < 3; i++)
+ for (i = 0; i < 4; i++)
token[i] = strsep(&str, ",");
if (str)
@@ -253,11 +255,25 @@ static int phram_setup(const char *val)
goto error;
}
- ret = register_device(name, start, len);
+ if (token[3]) {
+ ret = parse_num64(&erasesize, token[3]);
+ if (ret) {
+ parse_err("illegal erasesize\n");
+ goto error;
+ }
+ }
+
+ if (len == 0 || erasesize == 0 || erasesize > len
+ || erasesize > UINT_MAX || do_div(len, (uint32_t)erasesize) != 0) {
+ parse_err("illegal erasesize or len\n");
+ goto error;
+ }
+
+ ret = register_device(name, start, len, (uint32_t)erasesize);
if (ret)
goto error;
- pr_info("%s device: %#llx at %#llx\n", name, len, start);
+ pr_info("%s device: %#llx at %#llx for erasesize %#llx\n", name, len, start, erasesize);
return 0;
error:
@@ -298,7 +314,7 @@ static int phram_param_call(const char *val, const struct kernel_param *kp)
}
module_param_call(phram, phram_param_call, NULL, NULL, 0200);
-MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
+MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>[,<erasesize>]\"");
static int __init init_phram(void)
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 0b757d9ba2f6..6950a8764815 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -126,6 +126,7 @@ out:
}
/**
+ * powernv_flash_read
* @mtd: the device
* @from: the offset to read from
* @len: the number of bytes to read
@@ -142,6 +143,7 @@ static int powernv_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
}
/**
+ * powernv_flash_write
* @mtd: the device
* @to: the offset to write to
* @len: the number of bytes to write
@@ -158,6 +160,7 @@ static int powernv_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
}
/**
+ * powernv_flash_erase
* @mtd: the device
* @erase: the erase info
* Returns 0 if erase successful or -ERRNO if an error occurred
@@ -176,7 +179,7 @@ static int powernv_flash_erase(struct mtd_info *mtd, struct erase_info *erase)
/**
* powernv_flash_set_driver_info - Fill the mtd_info structure and docg3
- * structure @pdev: The platform device
+ * @dev: The device structure
* @mtd: The structure to fill
*/
static int powernv_flash_set_driver_info(struct device *dev,
diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
index 27cfe1c63a2e..a35450002284 100644
--- a/drivers/mtd/maps/physmap-bt1-rom.c
+++ b/drivers/mtd/maps/physmap-bt1-rom.c
@@ -31,12 +31,12 @@ static map_word __xipram bt1_rom_map_read(struct map_info *map,
unsigned long ofs)
{
void __iomem *src = map->virt + ofs;
- unsigned long shift;
+ unsigned int shift;
map_word ret;
u32 data;
/* Read data within offset dword. */
- shift = (unsigned long)src & 0x3;
+ shift = (uintptr_t)src & 0x3;
data = readl_relaxed(src - shift);
if (!shift) {
ret.x[0] = data;
@@ -60,7 +60,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map,
ssize_t len)
{
void __iomem *src = map->virt + from;
- ssize_t shift, chunk;
+ unsigned int shift, chunk;
u32 data;
if (len <= 0 || from >= map->size)
@@ -75,7 +75,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map,
* up into the next three stages: unaligned head, aligned body,
* unaligned tail.
*/
- shift = (ssize_t)src & 0x3;
+ shift = (uintptr_t)src & 0x3;
if (shift) {
chunk = min_t(ssize_t, 4 - shift, len);
data = readl_relaxed(src - shift);
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 311742c78155..0bec7c791d17 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -177,8 +177,12 @@ static int platram_probe(struct platform_device *pdev)
err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
pdata->partitions,
pdata->nr_partitions);
- if (!err)
- dev_info(&pdev->dev, "registered mtd device\n");
+ if (err) {
+ dev_err(&pdev->dev, "failed to register mtd device\n");
+ goto exit_free;
+ }
+
+ dev_info(&pdev->dev, "registered mtd device\n");
if (pdata->nr_partitions) {
/* add the whole device. */
@@ -186,10 +190,11 @@ static int platram_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"failed to register the entire device\n");
+ goto exit_free;
}
}
- return err;
+ return 0;
exit_free:
platram_remove(pdev);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 0c05f77f9b21..fb8e12d590a1 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -298,38 +298,10 @@ unlock:
return ret;
}
-static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
- int ret = -ENXIO;
-
- if (!dev)
- return ret;
-
- mutex_lock(&dev->lock);
-
- if (!dev->mtd)
- goto unlock;
-
- switch (cmd) {
- case BLKFLSBUF:
- ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
- break;
- default:
- ret = -ENOTTY;
- }
-unlock:
- mutex_unlock(&dev->lock);
- blktrans_dev_put(dev);
- return ret;
-}
-
static const struct block_device_operations mtd_block_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
- .ioctl = blktrans_ioctl,
.getgeo = blktrans_getgeo,
};
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index b40f46a43fc6..323035d4f2d0 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -881,7 +881,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
return mtd_block_isbad(mtd, offs);
- break;
}
case MEMSETBADBLOCK:
@@ -891,7 +890,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
return mtd_block_markbad(mtd, offs);
- break;
}
case OTPSELECT:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e9e163ae9d86..2d6423d89a17 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -993,6 +993,8 @@ int __get_mtd_device(struct mtd_info *mtd)
}
}
+ master->usecount++;
+
while (mtd->parent) {
mtd->usecount++;
mtd = mtd->parent;
@@ -1059,6 +1061,8 @@ void __put_mtd_device(struct mtd_info *mtd)
mtd = mtd->parent;
}
+ master->usecount--;
+
if (master->_put_device)
master->_put_device(master);
@@ -1578,7 +1582,7 @@ static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
* ECC byte
* @mtd: mtd info structure
* @eccbyte: the byte we are searching for
- * @sectionp: pointer where the section id will be stored
+ * @section: pointer where the section id will be stored
* @oobregion: OOB region information
*
* Works like mtd_ooblayout_find_region() except it searches for a specific ECC
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index c3575b686f79..12ca4f19cb14 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(mtd_add_partition);
/**
* __mtd_del_partition - delete MTD partition
*
- * @priv: MTD structure to be deleted
+ * @mtd: MTD structure to be deleted
*
* This function must be called with the partitions mutex locked.
*/
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index c3e2098372f2..38b6aa849c63 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -120,8 +120,8 @@ int get_tree_mtd(struct fs_context *fc,
struct fs_context *fc))
{
#ifdef CONFIG_BLOCK
- struct block_device *bdev;
- int ret, major;
+ dev_t dev;
+ int ret;
#endif
int mtdnr;
@@ -169,20 +169,15 @@ int get_tree_mtd(struct fs_context *fc,
/* try the old way - the hack where we allowed users to mount
* /dev/mtdblock$(n) but didn't actually _use_ the blockdev
*/
- bdev = lookup_bdev(fc->source);
- if (IS_ERR(bdev)) {
- ret = PTR_ERR(bdev);
+ ret = lookup_bdev(fc->source, &dev);
+ if (ret) {
errorf(fc, "MTD: Couldn't look up '%s': %d", fc->source, ret);
return ret;
}
pr_debug("MTDSB: lookup_bdev() returned 0\n");
- major = MAJOR(bdev->bd_dev);
- mtdnr = MINOR(bdev->bd_dev);
- bdput(bdev);
-
- if (major == MTD_BLOCK_MAJOR)
- return mtd_get_sb_by_nr(fc, mtdnr, fill_super);
+ if (MAJOR(dev) == MTD_BLOCK_MAJOR)
+ return mtd_get_sb_by_nr(fc, MINOR(dev), fill_super);
#endif /* CONFIG_BLOCK */
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4a9aed4f0104..b40455234cbd 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -13,7 +13,38 @@ menu "ECC engine support"
config MTD_NAND_ECC
bool
- depends on MTD_NAND_CORE
+ select MTD_NAND_CORE
+
+config MTD_NAND_ECC_SW_HAMMING
+ bool "Software Hamming ECC engine"
+ default y if MTD_RAW_NAND
+ select MTD_NAND_ECC
+ help
+ This enables support for software Hamming error
+ correction. This correction can correct up to 1 bit error
+ per chunk and detect up to 2 bit errors. While it used to be
+ widely used with old parts, newer NAND chips usually require
+ more strength correction and in this case BCH or RS will be
+ preferred.
+
+config MTD_NAND_ECC_SW_HAMMING_SMC
+ bool "NAND ECC Smart Media byte order"
+ depends on MTD_NAND_ECC_SW_HAMMING
+ default n
+ help
+ Software ECC according to the Smart Media Specification.
+ The original Linux implementation had byte 0 and 1 swapped.
+
+config MTD_NAND_ECC_SW_BCH
+ bool "Software BCH ECC engine"
+ select BCH
+ select MTD_NAND_ECC
+ default n
+ help
+ This enables support for software BCH error correction. Binary BCH
+ codes are more powerful and cpu intensive than traditional Hamming
+ ECC codes. They are used with NAND devices requiring more than 1 bit
+ of error correction.
endmenu
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 981372953b56..1c0b46960eb1 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -8,3 +8,5 @@ obj-y += raw/
obj-y += spi/
nandcore-$(CONFIG_MTD_NAND_ECC) += ecc.o
+nandcore-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += ecc-sw-hamming.o
+nandcore-$(CONFIG_MTD_NAND_ECC_SW_BCH) += ecc-sw-bch.o
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index b6de955ac8bf..5e13a03d2b32 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -208,6 +208,130 @@ int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
/**
+ * nanddev_get_ecc_engine() - Find and get a suitable ECC engine
+ * @nand: NAND device
+ */
+static int nanddev_get_ecc_engine(struct nand_device *nand)
+{
+ int engine_type;
+
+ /* Read the user desires in terms of ECC engine/configuration */
+ of_get_nand_ecc_user_config(nand);
+
+ engine_type = nand->ecc.user_conf.engine_type;
+ if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ engine_type = nand->ecc.defaults.engine_type;
+
+ switch (engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ return 0;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ nand->ecc.engine = nand_ecc_get_sw_engine(nand);
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ pr_err("On-host hardware ECC engines not supported yet\n");
+ break;
+ default:
+ pr_err("Missing ECC engine type\n");
+ }
+
+ if (!nand->ecc.engine)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
+ * @nand: NAND device
+ */
+static int nanddev_put_ecc_engine(struct nand_device *nand)
+{
+ switch (nand->ecc.ctx.conf.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ pr_err("On-host hardware ECC engines not supported yet\n");
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * nanddev_find_ecc_configuration() - Find a suitable ECC configuration
+ * @nand: NAND device
+ */
+static int nanddev_find_ecc_configuration(struct nand_device *nand)
+{
+ int ret;
+
+ if (!nand->ecc.engine)
+ return -ENOTSUPP;
+
+ ret = nand_ecc_init_ctx(nand);
+ if (ret)
+ return ret;
+
+ if (!nand_ecc_is_strong_enough(nand))
+ pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+ nand->mtd.name);
+
+ return 0;
+}
+
+/**
+ * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
+ * @nand: NAND device
+ */
+int nanddev_ecc_engine_init(struct nand_device *nand)
+{
+ int ret;
+
+ /* Look for the ECC engine to use */
+ ret = nanddev_get_ecc_engine(nand);
+ if (ret) {
+ pr_err("No ECC engine found\n");
+ return ret;
+ }
+
+ /* No ECC engine requested */
+ if (!nand->ecc.engine)
+ return 0;
+
+ /* Configure the engine: balance user input and chip requirements */
+ ret = nanddev_find_ecc_configuration(nand);
+ if (ret) {
+ pr_err("No suitable ECC configuration\n");
+ nanddev_put_ecc_engine(nand);
+
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);
+
+/**
+ * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
+ * @nand: NAND device
+ */
+void nanddev_ecc_engine_cleanup(struct nand_device *nand)
+{
+ if (nand->ecc.engine)
+ nand_ecc_cleanup_ctx(nand);
+
+ nanddev_put_ecc_engine(nand);
+}
+EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);
+
+/**
* nanddev_init() - Initialize a NAND device
* @nand: NAND device
* @ops: NAND device operations
diff --git a/drivers/mtd/nand/ecc-sw-bch.c b/drivers/mtd/nand/ecc-sw-bch.c
new file mode 100644
index 000000000000..0a0ac11d5725
--- /dev/null
+++ b/drivers/mtd/nand/ecc-sw-bch.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file provides ECC correction for more than 1 bit per block of data,
+ * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
+ *
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
+
+/**
+ * nand_ecc_sw_bch_calculate - Calculate the ECC corresponding to a data block
+ * @nand: NAND device
+ * @buf: Input buffer with raw data
+ * @code: Output buffer with ECC
+ */
+int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf, unsigned char *code)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int i;
+
+ memset(code, 0, engine_conf->code_size);
+ bch_encode(engine_conf->bch, buf, nand->ecc.ctx.conf.step_size, code);
+
+ /* apply mask so that an erased page is a valid codeword */
+ for (i = 0; i < engine_conf->code_size; i++)
+ code[i] ^= engine_conf->eccmask[i];
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_calculate);
+
+/**
+ * nand_ecc_sw_bch_correct - Detect, correct and report bit error(s)
+ * @nand: NAND device
+ * @buf: Raw data read from the chip
+ * @read_ecc: ECC bytes from the chip
+ * @calc_ecc: ECC calculated from the raw data
+ *
+ * Detect and correct bit errors for a data block.
+ */
+int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int step_size = nand->ecc.ctx.conf.step_size;
+ unsigned int *errloc = engine_conf->errloc;
+ int i, count;
+
+ count = bch_decode(engine_conf->bch, NULL, step_size, read_ecc,
+ calc_ecc, NULL, errloc);
+ if (count > 0) {
+ for (i = 0; i < count; i++) {
+ if (errloc[i] < (step_size * 8))
+ /* The error is in the data area: correct it */
+ buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
+
+ /* Otherwise the error is in the ECC area: nothing to do */
+ pr_debug("%s: corrected bitflip %u\n", __func__,
+ errloc[i]);
+ }
+ } else if (count < 0) {
+ pr_err("ECC unrecoverable error\n");
+ count = -EBADMSG;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_correct);
+
+/**
+ * nand_ecc_sw_bch_cleanup - Cleanup software BCH ECC resources
+ * @nand: NAND device
+ */
+static void nand_ecc_sw_bch_cleanup(struct nand_device *nand)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+
+ bch_free(engine_conf->bch);
+ kfree(engine_conf->errloc);
+ kfree(engine_conf->eccmask);
+}
+
+/**
+ * nand_ecc_sw_bch_init - Initialize software BCH ECC engine
+ * @nand: NAND device
+ *
+ * Returns: a pointer to a new NAND BCH control structure, or NULL upon failure
+ *
+ * Initialize NAND BCH error correction. @nand.ecc parameters 'step_size' and
+ * 'bytes' are used to compute the following BCH parameters:
+ * m, the Galois field order
+ * t, the error correction capability
+ * 'bytes' should be equal to the number of bytes required to store m * t
+ * bits, where m is such that 2^m - 1 > step_size * 8.
+ *
+ * Example: to configure 4 bit correction per 512 bytes, you should pass
+ * step_size = 512 (thus, m = 13 is the smallest integer such that 2^m - 1 > 512 * 8)
+ * bytes = 7 (7 bytes are required to store m * t = 13 * 4 = 52 bits)
+ */
+static int nand_ecc_sw_bch_init(struct nand_device *nand)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int eccsize = nand->ecc.ctx.conf.step_size;
+ unsigned int eccbytes = engine_conf->code_size;
+ unsigned int m, t, i;
+ unsigned char *erased_page;
+ int ret;
+
+ m = fls(1 + (8 * eccsize));
+ t = (eccbytes * 8) / m;
+
+ engine_conf->bch = bch_init(m, t, 0, false);
+ if (!engine_conf->bch)
+ return -EINVAL;
+
+ engine_conf->eccmask = kzalloc(eccbytes, GFP_KERNEL);
+ engine_conf->errloc = kmalloc_array(t, sizeof(*engine_conf->errloc),
+ GFP_KERNEL);
+ if (!engine_conf->eccmask || !engine_conf->errloc) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Compute and store the inverted ECC of an erased step */
+ erased_page = kmalloc(eccsize, GFP_KERNEL);
+ if (!erased_page) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ memset(erased_page, 0xff, eccsize);
+ bch_encode(engine_conf->bch, erased_page, eccsize,
+ engine_conf->eccmask);
+ kfree(erased_page);
+
+ for (i = 0; i < eccbytes; i++)
+ engine_conf->eccmask[i] ^= 0xff;
+
+ /* Verify that the number of code bytes has the expected value */
+ if (engine_conf->bch->ecc_bytes != eccbytes) {
+ pr_err("Invalid number of ECC bytes: %u, expected: %u\n",
+ eccbytes, engine_conf->bch->ecc_bytes);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Sanity checks */
+ if (8 * (eccsize + eccbytes) >= (1 << m)) {
+ pr_err("ECC step size is too large (%u)\n", eccsize);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ nand_ecc_sw_bch_cleanup(nand);
+
+ return ret;
+}
+
+int nand_ecc_sw_bch_init_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_ecc_sw_bch_conf *engine_conf;
+ unsigned int code_size = 0, nsteps;
+ int ret;
+
+ /* Only large page NAND chips may use BCH */
+ if (mtd->oobsize < 64) {
+ pr_err("BCH cannot be used with small page NAND chips\n");
+ return -EINVAL;
+ }
+
+ if (!mtd->ooblayout)
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+ conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ conf->algo = NAND_ECC_ALGO_BCH;
+ conf->step_size = nand->ecc.user_conf.step_size;
+ conf->strength = nand->ecc.user_conf.strength;
+
+ /*
+ * Board driver should supply ECC size and ECC strength
+ * values to select how many bits are correctable.
+ * Otherwise, default to 512 bytes for large page devices and 256 for
+ * small page devices.
+ */
+ if (!conf->step_size) {
+ if (mtd->oobsize >= 64)
+ conf->step_size = 512;
+ else
+ conf->step_size = 256;
+
+ conf->strength = 4;
+ }
+
+ nsteps = mtd->writesize / conf->step_size;
+
+ /* Maximize */
+ if (nand->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
+ conf->step_size = 1024;
+ nsteps = mtd->writesize / conf->step_size;
+ /* Reserve 2 bytes for the BBM */
+ code_size = (mtd->oobsize - 2) / nsteps;
+ conf->strength = code_size * 8 / fls(8 * conf->step_size);
+ }
+
+ if (!code_size)
+ code_size = DIV_ROUND_UP(conf->strength *
+ fls(8 * conf->step_size), 8);
+
+ if (!conf->strength)
+ conf->strength = (code_size * 8) / fls(8 * conf->step_size);
+
+ if (!code_size && !conf->strength) {
+ pr_err("Missing ECC parameters\n");
+ return -EINVAL;
+ }
+
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
+ if (!engine_conf)
+ return -ENOMEM;
+
+ ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand);
+ if (ret)
+ goto free_engine_conf;
+
+ engine_conf->code_size = code_size;
+ engine_conf->nsteps = nsteps;
+ engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!engine_conf->calc_buf || !engine_conf->code_buf) {
+ ret = -ENOMEM;
+ goto free_bufs;
+ }
+
+ nand->ecc.ctx.priv = engine_conf;
+ nand->ecc.ctx.total = nsteps * code_size;
+
+ ret = nand_ecc_sw_bch_init(nand);
+ if (ret)
+ goto free_bufs;
+
+ /* Verify the layout validity */
+ if (mtd_ooblayout_count_eccbytes(mtd) !=
+ engine_conf->nsteps * engine_conf->code_size) {
+ pr_err("Invalid ECC layout\n");
+ ret = -EINVAL;
+ goto cleanup_bch_ctx;
+ }
+
+ return 0;
+
+cleanup_bch_ctx:
+ nand_ecc_sw_bch_cleanup(nand);
+free_bufs:
+ nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
+ kfree(engine_conf->calc_buf);
+ kfree(engine_conf->code_buf);
+free_engine_conf:
+ kfree(engine_conf);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_init_ctx);
+
+void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+
+ if (engine_conf) {
+ nand_ecc_sw_bch_cleanup(nand);
+ nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
+ kfree(engine_conf->calc_buf);
+ kfree(engine_conf->code_buf);
+ kfree(engine_conf);
+ }
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_cleanup_ctx);
+
+static int nand_ecc_sw_bch_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int eccsize = nand->ecc.ctx.conf.step_size;
+ int eccbytes = engine_conf->code_size;
+ int eccsteps = engine_conf->nsteps;
+ int total = nand->ecc.ctx.total;
+ u8 *ecccalc = engine_conf->calc_buf;
+ const u8 *data;
+ int i;
+
+ /* Nothing to do for a raw operation */
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* This engine does not provide BBM/free OOB bytes protection */
+ if (!req->datalen)
+ return 0;
+
+ nand_ecc_tweak_req(&engine_conf->req_ctx, req);
+
+ /* No more preparation for page read */
+ if (req->type == NAND_PAGE_READ)
+ return 0;
+
+ /* Preparation for page write: derive the ECC bytes and place them */
+ for (i = 0, data = req->databuf.out;
+ eccsteps;
+ eccsteps--, i += eccbytes, data += eccsize)
+ nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]);
+
+ return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out,
+ 0, total);
+}
+
+static int nand_ecc_sw_bch_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int eccsize = nand->ecc.ctx.conf.step_size;
+ int total = nand->ecc.ctx.total;
+ int eccbytes = engine_conf->code_size;
+ int eccsteps = engine_conf->nsteps;
+ u8 *ecccalc = engine_conf->calc_buf;
+ u8 *ecccode = engine_conf->code_buf;
+ unsigned int max_bitflips = 0;
+ u8 *data = req->databuf.in;
+ int i, ret;
+
+ /* Nothing to do for a raw operation */
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* This engine does not provide BBM/free OOB bytes protection */
+ if (!req->datalen)
+ return 0;
+
+ /* No more preparation for page write */
+ if (req->type == NAND_PAGE_WRITE) {
+ nand_ecc_restore_req(&engine_conf->req_ctx, req);
+ return 0;
+ }
+
+ /* Finish a page read: retrieve the (raw) ECC bytes*/
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0,
+ total);
+ if (ret)
+ return ret;
+
+ /* Calculate the ECC bytes */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize)
+ nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]);
+
+ /* Finish a page read: compare and correct */
+ for (eccsteps = engine_conf->nsteps, i = 0, data = req->databuf.in;
+ eccsteps;
+ eccsteps--, i += eccbytes, data += eccsize) {
+ int stat = nand_ecc_sw_bch_correct(nand, data,
+ &ecccode[i],
+ &ecccalc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ nand_ecc_restore_req(&engine_conf->req_ctx, req);
+
+ return max_bitflips;
+}
+
+static struct nand_ecc_engine_ops nand_ecc_sw_bch_engine_ops = {
+ .init_ctx = nand_ecc_sw_bch_init_ctx,
+ .cleanup_ctx = nand_ecc_sw_bch_cleanup_ctx,
+ .prepare_io_req = nand_ecc_sw_bch_prepare_io_req,
+ .finish_io_req = nand_ecc_sw_bch_finish_io_req,
+};
+
+static struct nand_ecc_engine nand_ecc_sw_bch_engine = {
+ .ops = &nand_ecc_sw_bch_engine_ops,
+};
+
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
+{
+ return &nand_ecc_sw_bch_engine;
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_get_engine);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/ecc-sw-hamming.c
index b6a46b1b7781..6334d1d7735d 100644
--- a/drivers/mtd/nand/raw/nand_ecc.c
+++ b/drivers/mtd/nand/ecc-sw-hamming.c
@@ -17,9 +17,9 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
+#include <linux/slab.h>
#include <asm/byteorder.h>
/*
@@ -75,7 +75,7 @@ static const char bitsperbyte[256] = {
* addressbits is a lookup table to filter out the bits from the xor-ed
* ECC data that identify the faulty location.
* this is only used for repairing parity
- * see the comments in nand_correct_data for more details
+ * see the comments in nand_ecc_sw_hamming_correct for more details
*/
static const char addressbits[256] = {
0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
@@ -112,30 +112,21 @@ static const char addressbits[256] = {
0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
};
-/**
- * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
- * block
- * @buf: input buffer with raw data
- * @eccsize: data bytes per ECC step (256 or 512)
- * @code: output buffer with ECC
- * @sm_order: Smart Media byte ordering
- */
-void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
- unsigned char *code, bool sm_order)
+int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size,
+ unsigned char *code, bool sm_order)
{
+ const u32 *bp = (uint32_t *)buf;
+ const u32 eccsize_mult = (step_size == 256) ? 1 : 2;
+ /* current value in buffer */
+ u32 cur;
+ /* rp0..rp17 are the various accumulated parities (per byte) */
+ u32 rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7, rp8, rp9, rp10, rp11, rp12,
+ rp13, rp14, rp15, rp16, rp17;
+ /* Cumulative parity for all data */
+ u32 par;
+ /* Cumulative parity at the end of the loop (rp12, rp14, rp16) */
+ u32 tmppar;
int i;
- const uint32_t *bp = (uint32_t *)buf;
- /* 256 or 512 bytes/ecc */
- const uint32_t eccsize_mult = eccsize >> 8;
- uint32_t cur; /* current value in buffer */
- /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
- uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
- uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
- uint32_t rp17;
- uint32_t par; /* the cumulative parity for all data */
- uint32_t tmppar; /* the cumulative parity for this iteration;
- for rp12, rp14 and rp16 at the end of the
- loop */
par = 0;
rp4 = 0;
@@ -145,6 +136,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
rp12 = 0;
rp14 = 0;
rp16 = 0;
+ rp17 = 0;
/*
* The loop is unrolled a number of times;
@@ -356,45 +348,35 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
(invparity[par & 0x55] << 2) |
(invparity[rp17] << 1) |
(invparity[rp16] << 0);
+
+ return 0;
}
-EXPORT_SYMBOL(__nand_calculate_ecc);
+EXPORT_SYMBOL(ecc_sw_hamming_calculate);
/**
- * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
- * block
- * @chip: NAND chip object
- * @buf: input buffer with raw data
- * @code: output buffer with ECC
+ * nand_ecc_sw_hamming_calculate - Calculate 3-byte ECC for 256/512-byte block
+ * @nand: NAND device
+ * @buf: Input buffer with raw data
+ * @code: Output buffer with ECC
*/
-int nand_calculate_ecc(struct nand_chip *chip, const unsigned char *buf,
- unsigned char *code)
+int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf, unsigned char *code)
{
- bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int step_size = nand->ecc.ctx.conf.step_size;
- __nand_calculate_ecc(buf, chip->ecc.size, code, sm_order);
-
- return 0;
+ return ecc_sw_hamming_calculate(buf, step_size, code,
+ engine_conf->sm_order);
}
-EXPORT_SYMBOL(nand_calculate_ecc);
+EXPORT_SYMBOL(nand_ecc_sw_hamming_calculate);
-/**
- * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @buf: raw data read from the chip
- * @read_ecc: ECC from the chip
- * @calc_ecc: the ECC calculated from raw data
- * @eccsize: data bytes per ECC step (256 or 512)
- * @sm_order: Smart Media byte order
- *
- * Detect and correct a 1 bit error for eccsize byte block
- */
-int __nand_correct_data(unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc,
- unsigned int eccsize, bool sm_order)
+int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc,
+ unsigned char *calc_ecc, unsigned int step_size,
+ bool sm_order)
{
+ const u32 eccsize_mult = step_size >> 8;
unsigned char b0, b1, b2, bit_addr;
unsigned int byte_addr;
- /* 256 or 512 bytes/ecc */
- const uint32_t eccsize_mult = eccsize >> 8;
/*
* b0 to b2 indicate which bit is faulty (if any)
@@ -458,27 +440,220 @@ int __nand_correct_data(unsigned char *buf,
pr_err("%s: uncorrectable ECC error\n", __func__);
return -EBADMSG;
}
-EXPORT_SYMBOL(__nand_correct_data);
+EXPORT_SYMBOL(ecc_sw_hamming_correct);
/**
- * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @chip: NAND chip object
- * @buf: raw data read from the chip
- * @read_ecc: ECC from the chip
- * @calc_ecc: the ECC calculated from raw data
+ * nand_ecc_sw_hamming_correct - Detect and correct bit error(s)
+ * @nand: NAND device
+ * @buf: Raw data read from the chip
+ * @read_ecc: ECC bytes read from the chip
+ * @calc_ecc: ECC calculated from the raw data
*
- * Detect and correct a 1 bit error for 256/512 byte block
+ * Detect and correct up to 1 bit error per 256/512-byte block.
*/
-int nand_correct_data(struct nand_chip *chip, unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc)
+int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int step_size = nand->ecc.ctx.conf.step_size;
+
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc, step_size,
+ engine_conf->sm_order);
+}
+EXPORT_SYMBOL(nand_ecc_sw_hamming_correct);
+
+int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct nand_ecc_sw_hamming_conf *engine_conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int ret;
+
+ if (!mtd->ooblayout) {
+ switch (mtd->oobsize) {
+ case 8:
+ case 16:
+ mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
+ break;
+ case 64:
+ case 128:
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_hamming_ooblayout());
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ conf->algo = NAND_ECC_ALGO_HAMMING;
+ conf->step_size = nand->ecc.user_conf.step_size;
+ conf->strength = 1;
+
+ /* Use the strongest configuration by default */
+ if (conf->step_size != 256 && conf->step_size != 512)
+ conf->step_size = 256;
+
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
+ if (!engine_conf)
+ return -ENOMEM;
+
+ ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand);
+ if (ret)
+ goto free_engine_conf;
+
+ engine_conf->code_size = 3;
+ engine_conf->nsteps = mtd->writesize / conf->step_size;
+ engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!engine_conf->calc_buf || !engine_conf->code_buf) {
+ ret = -ENOMEM;
+ goto free_bufs;
+ }
+
+ nand->ecc.ctx.priv = engine_conf;
+ nand->ecc.ctx.total = engine_conf->nsteps * engine_conf->code_size;
+
+ return 0;
+
+free_bufs:
+ nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
+ kfree(engine_conf->calc_buf);
+ kfree(engine_conf->code_buf);
+free_engine_conf:
+ kfree(engine_conf);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_ecc_sw_hamming_init_ctx);
+
+void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+
+ if (engine_conf) {
+ nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
+ kfree(engine_conf->calc_buf);
+ kfree(engine_conf->code_buf);
+ kfree(engine_conf);
+ }
+}
+EXPORT_SYMBOL(nand_ecc_sw_hamming_cleanup_ctx);
+
+static int nand_ecc_sw_hamming_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
{
- bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int eccsize = nand->ecc.ctx.conf.step_size;
+ int eccbytes = engine_conf->code_size;
+ int eccsteps = engine_conf->nsteps;
+ int total = nand->ecc.ctx.total;
+ u8 *ecccalc = engine_conf->calc_buf;
+ const u8 *data;
+ int i;
+
+ /* Nothing to do for a raw operation */
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* This engine does not provide BBM/free OOB bytes protection */
+ if (!req->datalen)
+ return 0;
- return __nand_correct_data(buf, read_ecc, calc_ecc, chip->ecc.size,
- sm_order);
+ nand_ecc_tweak_req(&engine_conf->req_ctx, req);
+
+ /* No more preparation for page read */
+ if (req->type == NAND_PAGE_READ)
+ return 0;
+
+ /* Preparation for page write: derive the ECC bytes and place them */
+ for (i = 0, data = req->databuf.out;
+ eccsteps;
+ eccsteps--, i += eccbytes, data += eccsize)
+ nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]);
+
+ return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out,
+ 0, total);
+}
+
+static int nand_ecc_sw_hamming_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int eccsize = nand->ecc.ctx.conf.step_size;
+ int total = nand->ecc.ctx.total;
+ int eccbytes = engine_conf->code_size;
+ int eccsteps = engine_conf->nsteps;
+ u8 *ecccalc = engine_conf->calc_buf;
+ u8 *ecccode = engine_conf->code_buf;
+ unsigned int max_bitflips = 0;
+ u8 *data = req->databuf.in;
+ int i, ret;
+
+ /* Nothing to do for a raw operation */
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* This engine does not provide BBM/free OOB bytes protection */
+ if (!req->datalen)
+ return 0;
+
+ /* No more preparation for page write */
+ if (req->type == NAND_PAGE_WRITE) {
+ nand_ecc_restore_req(&engine_conf->req_ctx, req);
+ return 0;
+ }
+
+ /* Finish a page read: retrieve the (raw) ECC bytes*/
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0,
+ total);
+ if (ret)
+ return ret;
+
+ /* Calculate the ECC bytes */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize)
+ nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]);
+
+ /* Finish a page read: compare and correct */
+ for (eccsteps = engine_conf->nsteps, i = 0, data = req->databuf.in;
+ eccsteps;
+ eccsteps--, i += eccbytes, data += eccsize) {
+ int stat = nand_ecc_sw_hamming_correct(nand, data,
+ &ecccode[i],
+ &ecccalc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ nand_ecc_restore_req(&engine_conf->req_ctx, req);
+
+ return max_bitflips;
+}
+
+static struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops = {
+ .init_ctx = nand_ecc_sw_hamming_init_ctx,
+ .cleanup_ctx = nand_ecc_sw_hamming_cleanup_ctx,
+ .prepare_io_req = nand_ecc_sw_hamming_prepare_io_req,
+ .finish_io_req = nand_ecc_sw_hamming_finish_io_req,
+};
+
+static struct nand_ecc_engine nand_ecc_sw_hamming_engine = {
+ .ops = &nand_ecc_sw_hamming_engine_ops,
+};
+
+struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
+{
+ return &nand_ecc_sw_hamming_engine;
}
-EXPORT_SYMBOL(nand_correct_data);
+EXPORT_SYMBOL(nand_ecc_sw_hamming_get_engine);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
-MODULE_DESCRIPTION("Generic NAND ECC support");
+MODULE_DESCRIPTION("NAND software Hamming ECC support");
diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c
index 4a56e6c0da67..6c43dfda01d4 100644
--- a/drivers/mtd/nand/ecc.c
+++ b/drivers/mtd/nand/ecc.c
@@ -95,6 +95,7 @@
#include <linux/module.h>
#include <linux/mtd/nand.h>
+#include <linux/slab.h>
/**
* nand_ecc_init_ctx - Init the ECC engine context
@@ -104,7 +105,7 @@
*/
int nand_ecc_init_ctx(struct nand_device *nand)
{
- if (!nand->ecc.engine->ops->init_ctx)
+ if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx)
return 0;
return nand->ecc.engine->ops->init_ctx(nand);
@@ -117,7 +118,7 @@ EXPORT_SYMBOL(nand_ecc_init_ctx);
*/
void nand_ecc_cleanup_ctx(struct nand_device *nand)
{
- if (nand->ecc.engine->ops->cleanup_ctx)
+ if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx)
nand->ecc.engine->ops->cleanup_ctx(nand);
}
EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
@@ -130,7 +131,7 @@ EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
int nand_ecc_prepare_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
- if (!nand->ecc.engine->ops->prepare_io_req)
+ if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req)
return 0;
return nand->ecc.engine->ops->prepare_io_req(nand, req);
@@ -145,7 +146,7 @@ EXPORT_SYMBOL(nand_ecc_prepare_io_req);
int nand_ecc_finish_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
- if (!nand->ecc.engine->ops->finish_io_req)
+ if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req)
return 0;
return nand->ecc.engine->ops->finish_io_req(nand, req);
@@ -479,6 +480,137 @@ bool nand_ecc_is_strong_enough(struct nand_device *nand)
}
EXPORT_SYMBOL(nand_ecc_is_strong_enough);
+/* ECC engine driver internal helpers */
+int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_device *nand)
+{
+ unsigned int total_buffer_size;
+
+ ctx->nand = nand;
+
+ /* Let the user decide the exact length of each buffer */
+ if (!ctx->page_buffer_size)
+ ctx->page_buffer_size = nanddev_page_size(nand);
+ if (!ctx->oob_buffer_size)
+ ctx->oob_buffer_size = nanddev_per_page_oobsize(nand);
+
+ total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size;
+
+ ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL);
+ if (!ctx->spare_databuf)
+ return -ENOMEM;
+
+ ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking);
+
+void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx)
+{
+ kfree(ctx->spare_databuf);
+}
+EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking);
+
+/*
+ * Ensure data and OOB area is fully read/written otherwise the correction might
+ * not work as expected.
+ */
+void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req)
+{
+ struct nand_device *nand = ctx->nand;
+ struct nand_page_io_req *orig, *tweak;
+
+ /* Save the original request */
+ ctx->orig_req = *req;
+ ctx->bounce_data = false;
+ ctx->bounce_oob = false;
+ orig = &ctx->orig_req;
+ tweak = req;
+
+ /* Ensure the request covers the entire page */
+ if (orig->datalen < nanddev_page_size(nand)) {
+ ctx->bounce_data = true;
+ tweak->dataoffs = 0;
+ tweak->datalen = nanddev_page_size(nand);
+ tweak->databuf.in = ctx->spare_databuf;
+ memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size);
+ }
+
+ if (orig->ooblen < nanddev_per_page_oobsize(nand)) {
+ ctx->bounce_oob = true;
+ tweak->ooboffs = 0;
+ tweak->ooblen = nanddev_per_page_oobsize(nand);
+ tweak->oobbuf.in = ctx->spare_oobbuf;
+ memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
+ }
+
+ /* Copy the data that must be writen in the bounce buffers, if needed */
+ if (orig->type == NAND_PAGE_WRITE) {
+ if (ctx->bounce_data)
+ memcpy((void *)tweak->databuf.out + orig->dataoffs,
+ orig->databuf.out, orig->datalen);
+
+ if (ctx->bounce_oob)
+ memcpy((void *)tweak->oobbuf.out + orig->ooboffs,
+ orig->oobbuf.out, orig->ooblen);
+ }
+}
+EXPORT_SYMBOL_GPL(nand_ecc_tweak_req);
+
+void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req)
+{
+ struct nand_page_io_req *orig, *tweak;
+
+ orig = &ctx->orig_req;
+ tweak = req;
+
+ /* Restore the data read from the bounce buffers, if needed */
+ if (orig->type == NAND_PAGE_READ) {
+ if (ctx->bounce_data)
+ memcpy(orig->databuf.in,
+ tweak->databuf.in + orig->dataoffs,
+ orig->datalen);
+
+ if (ctx->bounce_oob)
+ memcpy(orig->oobbuf.in,
+ tweak->oobbuf.in + orig->ooboffs,
+ orig->ooblen);
+ }
+
+ /* Ensure the original request is restored */
+ *req = *orig;
+}
+EXPORT_SYMBOL_GPL(nand_ecc_restore_req);
+
+struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand)
+{
+ unsigned int algo = nand->ecc.user_conf.algo;
+
+ if (algo == NAND_ECC_ALGO_UNKNOWN)
+ algo = nand->ecc.defaults.algo;
+
+ switch (algo) {
+ case NAND_ECC_ALGO_HAMMING:
+ return nand_ecc_sw_hamming_get_engine();
+ case NAND_ECC_ALGO_BCH:
+ return nand_ecc_sw_bch_get_engine();
+ default:
+ break;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(nand_ecc_get_sw_engine);
+
+struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand)
+{
+ return nand->ecc.ondie_engine;
+}
+EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
MODULE_DESCRIPTION("Generic ECC engine");
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 188b8061e1f7..a9fdea26ea46 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -132,7 +132,7 @@ static const struct mtd_ooblayout_ops onenand_oob_128_ooblayout_ops = {
.free = onenand_ooblayout_128_free,
};
-/**
+/*
* onenand_oob_32_64 - oob info for large (2KB) page
*/
static int onenand_ooblayout_32_64_ecc(struct mtd_info *mtd, int section,
@@ -192,7 +192,7 @@ static const unsigned char ffchars[] = {
/**
* onenand_readw - [OneNAND Interface] Read OneNAND register
- * @param addr address to read
+ * @addr: address to read
*
* Read OneNAND register
*/
@@ -203,8 +203,8 @@ static unsigned short onenand_readw(void __iomem *addr)
/**
* onenand_writew - [OneNAND Interface] Write OneNAND register with value
- * @param value value to write
- * @param addr address to write
+ * @value: value to write
+ * @addr: address to write
*
* Write OneNAND register with value
*/
@@ -215,8 +215,8 @@ static void onenand_writew(unsigned short value, void __iomem *addr)
/**
* onenand_block_address - [DEFAULT] Get block address
- * @param this onenand chip data structure
- * @param block the block
+ * @this: onenand chip data structure
+ * @block: the block
* @return translated block address if DDP, otherwise same
*
* Setup Start Address 1 Register (F100h)
@@ -232,8 +232,8 @@ static int onenand_block_address(struct onenand_chip *this, int block)
/**
* onenand_bufferram_address - [DEFAULT] Get bufferram address
- * @param this onenand chip data structure
- * @param block the block
+ * @this: onenand chip data structure
+ * @block: the block
* @return set DBS value if DDP, otherwise 0
*
* Setup Start Address 2 Register (F101h) for DDP
@@ -249,8 +249,8 @@ static int onenand_bufferram_address(struct onenand_chip *this, int block)
/**
* onenand_page_address - [DEFAULT] Get page address
- * @param page the page address
- * @param sector the sector address
+ * @page: the page address
+ * @sector: the sector address
* @return combined page and sector address
*
* Setup Start Address 8 Register (F107h)
@@ -268,10 +268,10 @@ static int onenand_page_address(int page, int sector)
/**
* onenand_buffer_address - [DEFAULT] Get buffer address
- * @param dataram1 DataRAM index
- * @param sectors the sector address
- * @param count the number of sectors
- * @return the start buffer value
+ * @dataram1: DataRAM index
+ * @sectors: the sector address
+ * @count: the number of sectors
+ * Return: the start buffer value
*
* Setup Start Buffer Register (F200h)
*/
@@ -295,8 +295,8 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
/**
* flexonenand_block- For given address return block number
- * @param this - OneNAND device structure
- * @param addr - Address for which block number is needed
+ * @this: - OneNAND device structure
+ * @addr: - Address for which block number is needed
*/
static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
{
@@ -359,7 +359,7 @@ EXPORT_SYMBOL(onenand_addr);
/**
* onenand_get_density - [DEFAULT] Get OneNAND density
- * @param dev_id OneNAND device ID
+ * @dev_id: OneNAND device ID
*
* Get OneNAND density from device ID
*/
@@ -371,8 +371,8 @@ static inline int onenand_get_density(int dev_id)
/**
* flexonenand_region - [Flex-OneNAND] Return erase region of addr
- * @param mtd MTD device structure
- * @param addr address whose erase region needs to be identified
+ * @mtd: MTD device structure
+ * @addr: address whose erase region needs to be identified
*/
int flexonenand_region(struct mtd_info *mtd, loff_t addr)
{
@@ -387,10 +387,10 @@ EXPORT_SYMBOL(flexonenand_region);
/**
* onenand_command - [DEFAULT] Send command to OneNAND device
- * @param mtd MTD device structure
- * @param cmd the command to be sent
- * @param addr offset to read from or write to
- * @param len number of bytes to read or write
+ * @mtd: MTD device structure
+ * @cmd: the command to be sent
+ * @addr: offset to read from or write to
+ * @len: number of bytes to read or write
*
* Send command to OneNAND device. This function is used for middle/large page
* devices (1KB/2KB Bytes per page)
@@ -519,7 +519,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
/**
* onenand_read_ecc - return ecc status
- * @param this onenand chip structure
+ * @this: onenand chip structure
*/
static inline int onenand_read_ecc(struct onenand_chip *this)
{
@@ -543,8 +543,8 @@ static inline int onenand_read_ecc(struct onenand_chip *this)
/**
* onenand_wait - [DEFAULT] wait until the command is done
- * @param mtd MTD device structure
- * @param state state to select the max. timeout value
+ * @mtd: MTD device structure
+ * @state: state to select the max. timeout value
*
* Wait for command done. This applies to all OneNAND command
* Read can take up to 30us, erase up to 2ms and program up to 350us
@@ -625,8 +625,8 @@ static int onenand_wait(struct mtd_info *mtd, int state)
/*
* onenand_interrupt - [DEFAULT] onenand interrupt handler
- * @param irq onenand interrupt number
- * @param dev_id interrupt data
+ * @irq: onenand interrupt number
+ * @dev_id: interrupt data
*
* complete the work
*/
@@ -643,8 +643,8 @@ static irqreturn_t onenand_interrupt(int irq, void *data)
/*
* onenand_interrupt_wait - [DEFAULT] wait until the command is done
- * @param mtd MTD device structure
- * @param state state to select the max. timeout value
+ * @mtd: MTD device structure
+ * @state: state to select the max. timeout value
*
* Wait for command done.
*/
@@ -659,8 +659,8 @@ static int onenand_interrupt_wait(struct mtd_info *mtd, int state)
/*
* onenand_try_interrupt_wait - [DEFAULT] try interrupt wait
- * @param mtd MTD device structure
- * @param state state to select the max. timeout value
+ * @mtd: MTD device structure
+ * @state: state to select the max. timeout value
*
* Try interrupt based wait (It is used one-time)
*/
@@ -689,7 +689,7 @@ static int onenand_try_interrupt_wait(struct mtd_info *mtd, int state)
/*
* onenand_setup_wait - [OneNAND Interface] setup onenand wait method
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*
* There's two method to wait onenand work
* 1. polling - read interrupt status register
@@ -724,8 +724,8 @@ static void onenand_setup_wait(struct mtd_info *mtd)
/**
* onenand_bufferram_offset - [DEFAULT] BufferRAM offset
- * @param mtd MTD data structure
- * @param area BufferRAM area
+ * @mtd: MTD data structure
+ * @area: BufferRAM area
* @return offset given area
*
* Return BufferRAM offset given area
@@ -747,11 +747,11 @@ static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area)
/**
* onenand_read_bufferram - [OneNAND Interface] Read the bufferram area
- * @param mtd MTD data structure
- * @param area BufferRAM area
- * @param buffer the databuffer to put/get data
- * @param offset offset to read from or write to
- * @param count number of bytes to read/write
+ * @mtd: MTD data structure
+ * @area: BufferRAM area
+ * @buffer: the databuffer to put/get data
+ * @offset: offset to read from or write to
+ * @count: number of bytes to read/write
*
* Read the BufferRAM area
*/
@@ -783,11 +783,11 @@ static int onenand_read_bufferram(struct mtd_info *mtd, int area,
/**
* onenand_sync_read_bufferram - [OneNAND Interface] Read the bufferram area with Sync. Burst mode
- * @param mtd MTD data structure
- * @param area BufferRAM area
- * @param buffer the databuffer to put/get data
- * @param offset offset to read from or write to
- * @param count number of bytes to read/write
+ * @mtd: MTD data structure
+ * @area: BufferRAM area
+ * @buffer: the databuffer to put/get data
+ * @offset: offset to read from or write to
+ * @count: number of bytes to read/write
*
* Read the BufferRAM area with Sync. Burst Mode
*/
@@ -823,11 +823,11 @@ static int onenand_sync_read_bufferram(struct mtd_info *mtd, int area,
/**
* onenand_write_bufferram - [OneNAND Interface] Write the bufferram area
- * @param mtd MTD data structure
- * @param area BufferRAM area
- * @param buffer the databuffer to put/get data
- * @param offset offset to read from or write to
- * @param count number of bytes to read/write
+ * @mtd: MTD data structure
+ * @area: BufferRAM area
+ * @buffer: the databuffer to put/get data
+ * @offset: offset to read from or write to
+ * @count: number of bytes to read/write
*
* Write the BufferRAM area
*/
@@ -864,8 +864,8 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area,
/**
* onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode
- * @param mtd MTD data structure
- * @param addr address to check
+ * @mtd: MTD data structure
+ * @addr: address to check
* @return blockpage address
*
* Get blockpage address at 2x program mode
@@ -888,8 +888,8 @@ static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr)
/**
* onenand_check_bufferram - [GENERIC] Check BufferRAM information
- * @param mtd MTD data structure
- * @param addr address to check
+ * @mtd: MTD data structure
+ * @addr: address to check
* @return 1 if there are valid data, otherwise 0
*
* Check bufferram if there is data we required
@@ -930,9 +930,9 @@ static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
/**
* onenand_update_bufferram - [GENERIC] Update BufferRAM information
- * @param mtd MTD data structure
- * @param addr address to update
- * @param valid valid flag
+ * @mtd: MTD data structure
+ * @addr: address to update
+ * @valid: valid flag
*
* Update BufferRAM information
*/
@@ -963,9 +963,9 @@ static void onenand_update_bufferram(struct mtd_info *mtd, loff_t addr,
/**
* onenand_invalidate_bufferram - [GENERIC] Invalidate BufferRAM information
- * @param mtd MTD data structure
- * @param addr start address to invalidate
- * @param len length to invalidate
+ * @mtd: MTD data structure
+ * @addr: start address to invalidate
+ * @len: length to invalidate
*
* Invalidate BufferRAM information
*/
@@ -986,8 +986,8 @@ static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr,
/**
* onenand_get_device - [GENERIC] Get chip for selected access
- * @param mtd MTD device structure
- * @param new_state the state which is requested
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
*
* Get the device and lock it for exclusive access
*/
@@ -1024,7 +1024,7 @@ static int onenand_get_device(struct mtd_info *mtd, int new_state)
/**
* onenand_release_device - [GENERIC] release chip
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*
* Deselect, release chip lock and wake up anyone waiting on the device
*/
@@ -1043,10 +1043,10 @@ static void onenand_release_device(struct mtd_info *mtd)
/**
* onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer
- * @param mtd MTD device structure
- * @param buf destination address
- * @param column oob offset to read from
- * @param thislen oob length to read
+ * @mtd: MTD device structure
+ * @buf: destination address
+ * @column: oob offset to read from
+ * @thislen: oob length to read
*/
static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int column,
int thislen)
@@ -1061,9 +1061,9 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
/**
* onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
- * @param mtd MTD device structure
- * @param addr address to recover
- * @param status return value from onenand_wait / onenand_bbt_wait
+ * @mtd: MTD device structure
+ * @addr: address to recover
+ * @status: return value from onenand_wait / onenand_bbt_wait
*
* MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
* lower page address and MSB page has higher page address in paired pages.
@@ -1104,9 +1104,9 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
/**
* onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
- * @param mtd MTD device structure
- * @param from offset to read from
- * @param ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
*
* MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
* So, read-while-load is not present.
@@ -1206,9 +1206,9 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/**
* onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
- * @param mtd MTD device structure
- * @param from offset to read from
- * @param ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
*
* OneNAND read main and/or out-of-band data
*/
@@ -1335,9 +1335,9 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/**
* onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band
- * @param mtd MTD device structure
- * @param from offset to read from
- * @param ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
*
* OneNAND read out-of-band data from the spare area
*/
@@ -1430,10 +1430,10 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
/**
* onenand_read_oob - [MTD Interface] Read main and/or out-of-band
- * @param mtd: MTD device structure
- * @param from: offset to read from
- * @param ops: oob operation description structure
-
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
* Read main and/or out-of-band
*/
static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
@@ -1466,8 +1466,8 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
/**
* onenand_bbt_wait - [DEFAULT] wait until the command is done
- * @param mtd MTD device structure
- * @param state state to select the max. timeout value
+ * @mtd: MTD device structure
+ * @state: state to select the max. timeout value
*
* Wait for command done.
*/
@@ -1517,9 +1517,9 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
/**
* onenand_bbt_read_oob - [MTD Interface] OneNAND read out-of-band for bbt scan
- * @param mtd MTD device structure
- * @param from offset to read from
- * @param ops oob operation description structure
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
*
* OneNAND read out-of-band data from the spare area for bbt scan
*/
@@ -1594,9 +1594,9 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
/**
* onenand_verify_oob - [GENERIC] verify the oob contents after a write
- * @param mtd MTD device structure
- * @param buf the databuffer to verify
- * @param to offset to read from
+ * @mtd: MTD device structure
+ * @buf: the databuffer to verify
+ * @to: offset to read from
*/
static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
{
@@ -1622,10 +1622,10 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
/**
* onenand_verify - [GENERIC] verify the chip contents after a write
- * @param mtd MTD device structure
- * @param buf the databuffer to verify
- * @param addr offset to read from
- * @param len number of bytes to read and compare
+ * @mtd: MTD device structure
+ * @buf: the databuffer to verify
+ * @addr: offset to read from
+ * @len: number of bytes to read and compare
*/
static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
{
@@ -1684,11 +1684,11 @@ static void onenand_panic_wait(struct mtd_info *mtd)
/**
* onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context
- * @param mtd MTD device structure
- * @param to offset to write to
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of written bytes
- * @param buf the data to write
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
*
* Write with ECC
*/
@@ -1762,11 +1762,11 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
/**
* onenand_fill_auto_oob - [INTERN] oob auto-placement transfer
- * @param mtd MTD device structure
- * @param oob_buf oob buffer
- * @param buf source address
- * @param column oob offset to write to
- * @param thislen oob length to write
+ * @mtd: MTD device structure
+ * @oob_buf: oob buffer
+ * @buf: source address
+ * @column: oob offset to write to
+ * @thislen: oob length to write
*/
static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
const u_char *buf, int column, int thislen)
@@ -1776,9 +1776,9 @@ static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
/**
* onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band
- * @param mtd MTD device structure
- * @param to offset to write to
- * @param ops oob operation description structure
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
*
* Write main and/or oob with ECC
*/
@@ -1957,12 +1957,9 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/**
* onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band
- * @param mtd MTD device structure
- * @param to offset to write to
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of written bytes
- * @param buf the data to write
- * @param mode operation mode
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
*
* OneNAND write out-of-band
*/
@@ -2070,9 +2067,9 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
/**
* onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
- * @param mtd: MTD device structure
- * @param to: offset to write
- * @param ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @to: offset to write
+ * @ops: oob operation description structure
*/
static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
@@ -2101,9 +2098,9 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
/**
* onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad
- * @param mtd MTD device structure
- * @param ofs offset from device start
- * @param allowbbt 1, if its allowed to access the bbt area
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
@@ -2144,9 +2141,9 @@ static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
/**
* onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase
- * @param mtd MTD device structure
- * @param instr erase instruction
- * @param region erase region
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @block_size: block size
*
* Erase one or more blocks up to 64 block at a time
*/
@@ -2254,10 +2251,10 @@ static int onenand_multiblock_erase(struct mtd_info *mtd,
/**
* onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase
- * @param mtd MTD device structure
- * @param instr erase instruction
- * @param region erase region
- * @param block_size erase block size
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @region: erase region
+ * @block_size: erase block size
*
* Erase one or more blocks one block at a time
*/
@@ -2326,8 +2323,8 @@ static int onenand_block_by_block_erase(struct mtd_info *mtd,
/**
* onenand_erase - [MTD Interface] erase block(s)
- * @param mtd MTD device structure
- * @param instr erase instruction
+ * @mtd: MTD device structure
+ * @instr: erase instruction
*
* Erase one or more blocks
*/
@@ -2391,7 +2388,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
/**
* onenand_sync - [MTD Interface] sync
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*
* Sync is actually a wait for chip ready function
*/
@@ -2408,8 +2405,8 @@ static void onenand_sync(struct mtd_info *mtd)
/**
* onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
- * @param mtd MTD device structure
- * @param ofs offset relative to mtd start
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
*
* Check whether the block is bad
*/
@@ -2425,8 +2422,8 @@ static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
/**
* onenand_default_block_markbad - [DEFAULT] mark a block bad
- * @param mtd MTD device structure
- * @param ofs offset from device start
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
*
* This is the default implementation, which can be overridden by
* a hardware specific driver.
@@ -2460,8 +2457,8 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
/**
* onenand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
- * @param mtd MTD device structure
- * @param ofs offset relative to mtd start
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
*
* Mark the block as bad
*/
@@ -2486,10 +2483,10 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
/**
* onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s)
- * @param mtd MTD device structure
- * @param ofs offset relative to mtd start
- * @param len number of bytes to lock or unlock
- * @param cmd lock or unlock command
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @len: number of bytes to lock or unlock
+ * @cmd: lock or unlock command
*
* Lock or unlock one or more blocks
*/
@@ -2566,9 +2563,9 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
/**
* onenand_lock - [MTD Interface] Lock block(s)
- * @param mtd MTD device structure
- * @param ofs offset relative to mtd start
- * @param len number of bytes to unlock
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @len: number of bytes to unlock
*
* Lock one or more blocks
*/
@@ -2584,9 +2581,9 @@ static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/**
* onenand_unlock - [MTD Interface] Unlock block(s)
- * @param mtd MTD device structure
- * @param ofs offset relative to mtd start
- * @param len number of bytes to unlock
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @len: number of bytes to unlock
*
* Unlock one or more blocks
*/
@@ -2602,7 +2599,7 @@ static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/**
* onenand_check_lock_status - [OneNAND Interface] Check lock status
- * @param this onenand chip data structure
+ * @this: onenand chip data structure
*
* Check lock status
*/
@@ -2636,7 +2633,7 @@ static int onenand_check_lock_status(struct onenand_chip *this)
/**
* onenand_unlock_all - [OneNAND Interface] unlock all blocks
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*
* Unlock all blocks
*/
@@ -2683,10 +2680,10 @@ static void onenand_unlock_all(struct mtd_info *mtd)
/**
* onenand_otp_command - Send OTP specific command to OneNAND device
- * @param mtd MTD device structure
- * @param cmd the command to be sent
- * @param addr offset to read from or write to
- * @param len number of bytes to read or write
+ * @mtd: MTD device structure
+ * @cmd: the command to be sent
+ * @addr: offset to read from or write to
+ * @len: number of bytes to read or write
*/
static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
size_t len)
@@ -2758,11 +2755,9 @@ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
/**
* onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP
- * @param mtd MTD device structure
- * @param to offset to write to
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of written bytes
- * @param buf the data to write
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
*
* OneNAND write out-of-band only for OTP
*/
@@ -2889,11 +2884,11 @@ typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
/**
* do_otp_read - [DEFAULT] Read OTP block area
- * @param mtd MTD device structure
- * @param from The offset to read
- * @param len number of bytes to read
- * @param retlen pointer to variable to store the number of readbytes
- * @param buf the databuffer to put/get data
+ * @mtd: MTD device structure
+ * @from: The offset to read
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of readbytes
+ * @buf: the databuffer to put/get data
*
* Read OTP block area.
*/
@@ -2926,11 +2921,11 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
/**
* do_otp_write - [DEFAULT] Write OTP block area
- * @param mtd MTD device structure
- * @param to The offset to write
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of write bytes
- * @param buf the databuffer to put/get data
+ * @mtd: MTD device structure
+ * @to: The offset to write
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of write bytes
+ * @buf: the databuffer to put/get data
*
* Write OTP block area.
*/
@@ -2970,11 +2965,11 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
/**
* do_otp_lock - [DEFAULT] Lock OTP block area
- * @param mtd MTD device structure
- * @param from The offset to lock
- * @param len number of bytes to lock
- * @param retlen pointer to variable to store the number of lock bytes
- * @param buf the databuffer to put/get data
+ * @mtd: MTD device structure
+ * @from: The offset to lock
+ * @len: number of bytes to lock
+ * @retlen: pointer to variable to store the number of lock bytes
+ * @buf: the databuffer to put/get data
*
* Lock OTP block area.
*/
@@ -3018,13 +3013,13 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
/**
* onenand_otp_walk - [DEFAULT] Handle OTP operation
- * @param mtd MTD device structure
- * @param from The offset to read/write
- * @param len number of bytes to read/write
- * @param retlen pointer to variable to store the number of read bytes
- * @param buf the databuffer to put/get data
- * @param action do given action
- * @param mode specify user and factory
+ * @mtd: MTD device structure
+ * @from: The offset to read/write
+ * @len: number of bytes to read/write
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put/get data
+ * @action: do given action
+ * @mode: specify user and factory
*
* Handle OTP operation.
*/
@@ -3099,10 +3094,10 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
/**
* onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info
- * @param mtd MTD device structure
- * @param len number of bytes to read
- * @param retlen pointer to variable to store the number of read bytes
- * @param buf the databuffer to put/get data
+ * @mtd: MTD device structure
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put/get data
*
* Read factory OTP info.
*/
@@ -3115,11 +3110,11 @@ static int onenand_get_fact_prot_info(struct mtd_info *mtd, size_t len,
/**
* onenand_read_fact_prot_reg - [MTD Interface] Read factory OTP area
- * @param mtd MTD device structure
- * @param from The offset to read
- * @param len number of bytes to read
- * @param retlen pointer to variable to store the number of read bytes
- * @param buf the databuffer to put/get data
+ * @mtd: MTD device structure
+ * @from: The offset to read
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put/get data
*
* Read factory OTP area.
*/
@@ -3131,10 +3126,10 @@ static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
/**
* onenand_get_user_prot_info - [MTD Interface] Read user OTP info
- * @param mtd MTD device structure
- * @param retlen pointer to variable to store the number of read bytes
- * @param len number of bytes to read
- * @param buf the databuffer to put/get data
+ * @mtd: MTD device structure
+ * @retlen: pointer to variable to store the number of read bytes
+ * @len: number of bytes to read
+ * @buf: the databuffer to put/get data
*
* Read user OTP info.
*/
@@ -3147,11 +3142,11 @@ static int onenand_get_user_prot_info(struct mtd_info *mtd, size_t len,
/**
* onenand_read_user_prot_reg - [MTD Interface] Read user OTP area
- * @param mtd MTD device structure
- * @param from The offset to read
- * @param len number of bytes to read
- * @param retlen pointer to variable to store the number of read bytes
- * @param buf the databuffer to put/get data
+ * @mtd: MTD device structure
+ * @from: The offset to read
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put/get data
*
* Read user OTP area.
*/
@@ -3163,11 +3158,11 @@ static int onenand_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
/**
* onenand_write_user_prot_reg - [MTD Interface] Write user OTP area
- * @param mtd MTD device structure
- * @param from The offset to write
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of write bytes
- * @param buf the databuffer to put/get data
+ * @mtd: MTD device structure
+ * @from: The offset to write
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of write bytes
+ * @buf: the databuffer to put/get data
*
* Write user OTP area.
*/
@@ -3179,9 +3174,9 @@ static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
/**
* onenand_lock_user_prot_reg - [MTD Interface] Lock user OTP area
- * @param mtd MTD device structure
- * @param from The offset to lock
- * @param len number of bytes to unlock
+ * @mtd: MTD device structure
+ * @from: The offset to lock
+ * @len: number of bytes to unlock
*
* Write lock mark on spare area in page 0 in OTP block
*/
@@ -3234,7 +3229,7 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
/**
* onenand_check_features - Check and set OneNAND features
- * @param mtd MTD data structure
+ * @mtd: MTD data structure
*
* Check and set OneNAND features
* - lock scheme
@@ -3324,8 +3319,8 @@ static void onenand_check_features(struct mtd_info *mtd)
/**
* onenand_print_device_info - Print device & version ID
- * @param device device ID
- * @param version version ID
+ * @device: device ID
+ * @version: version ID
*
* Print device & version ID
*/
@@ -3355,7 +3350,7 @@ static const struct onenand_manufacturers onenand_manuf_ids[] = {
/**
* onenand_check_maf - Check manufacturer ID
- * @param manuf manufacturer ID
+ * @manuf: manufacturer ID
*
* Check manufacturer ID
*/
@@ -3380,9 +3375,9 @@ static int onenand_check_maf(int manuf)
}
/**
-* flexonenand_get_boundary - Reads the SLC boundary
-* @param onenand_info - onenand info structure
-**/
+ * flexonenand_get_boundary - Reads the SLC boundary
+ * @mtd: MTD data structure
+ */
static int flexonenand_get_boundary(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
@@ -3422,7 +3417,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
/**
* flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
* boundary[], diesize[], mtd->size, mtd->erasesize
- * @param mtd - MTD device structure
+ * @mtd: - MTD device structure
*/
static void flexonenand_get_size(struct mtd_info *mtd)
{
@@ -3493,9 +3488,9 @@ static void flexonenand_get_size(struct mtd_info *mtd)
/**
* flexonenand_check_blocks_erased - Check if blocks are erased
- * @param mtd_info - mtd info structure
- * @param start - first erase block to check
- * @param end - last erase block to check
+ * @mtd: mtd info structure
+ * @start: first erase block to check
+ * @end: last erase block to check
*
* Converting an unerased block from MLC to SLC
* causes byte values to change. Since both data and its ECC
@@ -3548,9 +3543,8 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
return 0;
}
-/**
+/*
* flexonenand_set_boundary - Writes the SLC boundary
- * @param mtd - mtd info structure
*/
static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
int boundary, int lock)
@@ -3640,7 +3634,7 @@ out:
/**
* onenand_chip_probe - [OneNAND Interface] The generic chip probe
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*
* OneNAND detection method:
* Compare the values from command with ones from register
@@ -3688,7 +3682,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)
/**
* onenand_probe - [OneNAND Interface] Probe the OneNAND device
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*/
static int onenand_probe(struct mtd_info *mtd)
{
@@ -3783,7 +3777,7 @@ static int onenand_probe(struct mtd_info *mtd)
/**
* onenand_suspend - [MTD Interface] Suspend the OneNAND flash
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*/
static int onenand_suspend(struct mtd_info *mtd)
{
@@ -3792,7 +3786,7 @@ static int onenand_suspend(struct mtd_info *mtd)
/**
* onenand_resume - [MTD Interface] Resume the OneNAND flash
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*/
static void onenand_resume(struct mtd_info *mtd)
{
@@ -3807,8 +3801,8 @@ static void onenand_resume(struct mtd_info *mtd)
/**
* onenand_scan - [OneNAND Interface] Scan for the OneNAND device
- * @param mtd MTD device structure
- * @param maxchips Number of chips to scan for
+ * @mtd: MTD device structure
+ * @maxchips: Number of chips to scan for
*
* This fills out all the not initialized function pointers
* with the defaults.
@@ -3985,7 +3979,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
/**
* onenand_release - [OneNAND Interface] Free resources held by the OneNAND device
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*/
void onenand_release(struct mtd_info *mtd)
{
diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c
index 57c31c81be18..def89f108007 100644
--- a/drivers/mtd/nand/onenand/onenand_bbt.c
+++ b/drivers/mtd/nand/onenand/onenand_bbt.c
@@ -18,10 +18,10 @@
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
- * @param buf the buffer to search
- * @param len the length of buffer to search
- * @param paglen the pagelength
- * @param td search pattern descriptor
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block
* tables and good / bad block identifiers. Same as check_pattern, but
@@ -44,10 +44,10 @@ static int check_short_pattern(uint8_t *buf, int len, int paglen, struct nand_bb
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @param mtd MTD device structure
- * @param buf temporary buffer
- * @param bd descriptor for the good/bad block search pattern
- * @param chip create the table for a specific chip, -1 read all chips.
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips.
* Applies only if NAND_BBT_PERCHIP option is set
*
* Create a bad block table by scanning the device
@@ -122,8 +122,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/**
* onenand_memory_bbt - [GENERIC] create a memory based bad block table
- * @param mtd MTD device structure
- * @param bd descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device
* for manufacturer / software marked good / bad blocks
@@ -137,9 +137,9 @@ static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_desc
/**
* onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad
- * @param mtd MTD device structure
- * @param offs offset in the device
- * @param allowbbt allow access to bad block table region
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
*/
static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
@@ -166,8 +166,8 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
/**
* onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s)
- * @param mtd MTD device structure
- * @param bd descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already
* available. If not it scans the device for manufacturer
@@ -221,7 +221,7 @@ static struct nand_bbt_descr largepage_memorybased = {
/**
* onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device
- * @param mtd MTD device structure
+ * @mtd: MTD device structure
*
* This function selects the default bad block table
* support for the device and calls the onenand_scan_bbt function
diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
index d8c0bd002c2b..12825eb97938 100644
--- a/drivers/mtd/nand/onenand/onenand_omap2.c
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
@@ -371,12 +371,12 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
/*
- * If the buffer address is not DMA-able, len is not long enough to make
- * DMA transfers profitable or panic_write() may be in an interrupt
- * context fallback to PIO mode.
+ * If the buffer address is not DMA-able, len is not long enough to
+ * make DMA transfers profitable or if invoked from panic_write()
+ * fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
- count < 384 || in_interrupt() || oops_in_progress)
+ count < 384 || mtd->oops_panic_write)
goto out_copy;
xtra = count & 3;
@@ -418,12 +418,12 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
/*
- * If the buffer address is not DMA-able, len is not long enough to make
- * DMA transfers profitable or panic_write() may be in an interrupt
- * context fallback to PIO mode.
+ * If the buffer address is not DMA-able, len is not long enough to
+ * make DMA transfers profitable or if invoked from panic_write()
+ * fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
- count < 384 || in_interrupt() || oops_in_progress)
+ count < 384 || mtd->oops_panic_write)
goto out_copy;
dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 6c46f25b57e2..442a039b92f3 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -1,20 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-config MTD_NAND_ECC_SW_HAMMING
- tristate
-
-config MTD_NAND_ECC_SW_HAMMING_SMC
- bool "NAND ECC Smart Media byte order"
- depends on MTD_NAND_ECC_SW_HAMMING
- default n
- help
- Software ECC according to the Smart Media Specification.
- The original Linux implementation had byte 0 and 1 swapped.
-
menuconfig MTD_RAW_NAND
tristate "Raw/Parallel NAND Device Support"
select MTD_NAND_CORE
select MTD_NAND_ECC
- select MTD_NAND_ECC_SW_HAMMING
help
This enables support for accessing all type of raw/parallel
NAND flash devices. For further information see
@@ -22,16 +10,6 @@ menuconfig MTD_RAW_NAND
if MTD_RAW_NAND
-config MTD_NAND_ECC_SW_BCH
- bool "Support software BCH ECC"
- select BCH
- default n
- help
- This enables support for software BCH error correction. Binary BCH
- codes are more powerful and cpu intensive than traditional Hamming
- ECC codes. They are used with NAND devices requiring more than 1 bit
- of error correction.
-
comment "Raw/parallel NAND flash controllers"
config MTD_NAND_DENALI
@@ -93,6 +71,7 @@ config MTD_NAND_AU1550
config MTD_NAND_NDFC
tristate "IBM/MCC 4xx NAND controller"
depends on 4xx
+ select MTD_NAND_ECC_SW_HAMMING
select MTD_NAND_ECC_SW_HAMMING_SMC
help
NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
@@ -313,7 +292,7 @@ config MTD_NAND_VF610_NFC
config MTD_NAND_MXC
tristate "Freescale MXC NAND controller"
depends on ARCH_MXC || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && OF
help
This enables the driver for the NAND flash controller on the
MXC processors.
@@ -462,6 +441,26 @@ config MTD_NAND_ARASAN
Enables the driver for the Arasan NAND flash controller on
Zynq Ultrascale+ MPSoC.
+config MTD_NAND_INTEL_LGM
+ tristate "Support for NAND controller on Intel LGM SoC"
+ depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND Flash chips on Intel's LGM SoC.
+ NAND flash controller interfaced through the External Bus Unit.
+
+config MTD_NAND_ROCKCHIP
+ tristate "Rockchip NAND controller"
+ depends on ARCH_ROCKCHIP && HAS_IOMEM
+ help
+ Enables support for NAND controller on Rockchip SoCs.
+ There are four different versions of NAND FLASH Controllers,
+ including:
+ NFC v600: RK2928, RK3066, RK3188
+ NFC v622: RK3036, RK3128
+ NFC v800: RK3308, RV1108
+ NFC v900: PX30, RK3326
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 2930f5b9015d..32475a28d8f8 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MTD_RAW_NAND) += nand.o
-obj-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += nand_ecc.o
-nand-$(CONFIG_MTD_NAND_ECC_SW_BCH) += nand_bch.o
obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
@@ -58,6 +56,8 @@ obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o
obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o
+obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
+obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index fbb4ea751be8..549aac00228e 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -118,6 +118,7 @@
* @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
* @len: Data transfer length
* @read: Data transfer direction from the controller point of view
+ * @buf: Data buffer
*/
struct anfc_op {
u32 pkt_reg;
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 7b6b354f2d39..99116896cfd6 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -3,6 +3,7 @@
* Copyright (C) 2004 Embedded Edge, LLC
*/
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 2da39ab89286..659eaa6f0980 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -1846,7 +1846,7 @@ static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
}
}
-/**
+/*
* Kick EDU engine
*/
static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
@@ -1937,7 +1937,7 @@ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
return ret;
}
-/**
+/*
* Construct a FLASH_DMA descriptor as part of a linked list. You must know the
* following ahead of time:
* - Is this descriptor the beginning or end of a linked list?
@@ -1970,7 +1970,7 @@ static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
return 0;
}
-/**
+/*
* Kick the FLASH_DMA engine, with a given DMA descriptor
*/
static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index 2b94f385a1a8..d0e8ffd55c22 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -359,10 +359,10 @@ static int cafe_nand_read_oob(struct nand_chip *chip, int page)
}
/**
* cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller expects OOB data read to chip->oob_poi
+ * @page: page number to read
*
* The hw generator calculates the error syndrome automatically. Therefore
* we need a special oob layout and handling.
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index 282203debd0c..6edf78c16fc8 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -19,7 +19,6 @@
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/iopoll.h>
@@ -252,7 +251,7 @@ static int cs553x_attach_chip(struct nand_chip *chip)
chip->ecc.bytes = 3;
chip->ecc.hwctl = cs_enable_hwecc;
chip->ecc.calculate = cs_calculate_ecc;
- chip->ecc.correct = nand_correct_data;
+ chip->ecc.correct = rawnand_sw_hamming_correct;
chip->ecc.strength = 1;
return 0;
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index f8c36d19ab47..118da9944e3b 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -586,10 +586,10 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
return PTR_ERR(pdata);
/* Use board-specific ECC config */
- info->chip.ecc.engine_type = pdata->engine_type;
- info->chip.ecc.placement = pdata->ecc_placement;
+ chip->ecc.engine_type = pdata->engine_type;
+ chip->ecc.placement = pdata->ecc_placement;
- switch (info->chip.ecc.engine_type) {
+ switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
pdata->ecc_bits = 0;
break;
@@ -601,7 +601,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
* NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
* field to davinci_nand_pdata.
*/
- info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
break;
case NAND_ECC_ENGINE_TYPE_ON_HOST:
if (pdata->ecc_bits == 4) {
@@ -628,12 +628,12 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
if (ret == -EBUSY)
return ret;
- info->chip.ecc.calculate = nand_davinci_calculate_4bit;
- info->chip.ecc.correct = nand_davinci_correct_4bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
- info->chip.ecc.bytes = 10;
- info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
- info->chip.ecc.algo = NAND_ECC_ALGO_BCH;
+ chip->ecc.calculate = nand_davinci_calculate_4bit;
+ chip->ecc.correct = nand_davinci_correct_4bit;
+ chip->ecc.hwctl = nand_davinci_hwctl_4bit;
+ chip->ecc.bytes = 10;
+ chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
/*
* Update ECC layout if needed ... for 1-bit HW ECC, the
@@ -651,20 +651,20 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
} else if (chunks == 4 || chunks == 8) {
mtd_set_ooblayout(mtd,
nand_get_large_page_ooblayout());
- info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
+ chip->ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
} else {
return -EIO;
}
} else {
/* 1bit ecc hamming */
- info->chip.ecc.calculate = nand_davinci_calculate_1bit;
- info->chip.ecc.correct = nand_davinci_correct_1bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
- info->chip.ecc.bytes = 3;
- info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
+ chip->ecc.calculate = nand_davinci_calculate_1bit;
+ chip->ecc.correct = nand_davinci_correct_1bit;
+ chip->ecc.hwctl = nand_davinci_hwctl_1bit;
+ chip->ecc.bytes = 3;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
- info->chip.ecc.size = 512;
- info->chip.ecc.strength = pdata->ecc_bits;
+ chip->ecc.size = 512;
+ chip->ecc.strength = pdata->ecc_bits;
break;
default:
return -EINVAL;
@@ -899,7 +899,7 @@ static int nand_davinci_remove(struct platform_device *pdev)
int ret;
spin_lock_irq(&davinci_nand_lock);
- if (info->chip.ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
+ if (chip->ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 26b265e4384a..5d2ddb037a9a 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -216,7 +216,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
{
- volatile char dummy;
+ volatile char __always_unused dummy;
int i;
for (i = 0; i < cycles; i++) {
@@ -703,7 +703,7 @@ static int doc200x_calculate_ecc(struct nand_chip *this, const u_char *dat,
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
- int emptymatch = 1;
+ int __always_unused emptymatch = 1;
/* flush the pipeline */
if (DoC_is_2000(doc)) {
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index b2af7f81fdf8..aab93b9e6052 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -22,7 +22,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index e345f9d9f8e8..02d500176838 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -15,7 +15,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/fsl_ifc.h>
#include <linux/iopoll.h>
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index d5813b9abc8e..b3cc427100a2 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/mtd.h>
#include <linux/of_platform.h>
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index c88421a1c078..0101c0fab50a 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -26,7 +26,6 @@
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/mtd/partitions.h>
@@ -918,7 +917,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
case NAND_ECC_ENGINE_TYPE_ON_HOST:
dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
nand->ecc.calculate = fsmc_read_hwecc_ecc1;
- nand->ecc.correct = nand_correct_data;
+ nand->ecc.correct = rawnand_sw_hamming_correct;
nand->ecc.hwctl = fsmc_enable_hwecc;
nand->ecc.bytes = 3;
nand->ecc.strength = 1;
@@ -942,7 +941,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
/*
* Don't set layout for BCH4 SW ECC. This will be
- * generated later in nand_bch_init() later.
+ * generated later during BCH initialization.
*/
if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
switch (mtd->oobsize) {
diff --git a/drivers/mtd/nand/raw/gpmi-nand/Makefile b/drivers/mtd/nand/raw/gpmi-nand/Makefile
index 9bd81a31e02e..247cbfceaa19 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/Makefile
+++ b/drivers/mtd/nand/raw/gpmi-nand/Makefile
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
-gpmi_nand-objs += gpmi-nand.o
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand.o
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index dc8104e67506..3fa8c22d3f36 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -149,8 +149,10 @@ static int gpmi_init(struct gpmi_nand_data *this)
int ret;
ret = pm_runtime_get_sync(this->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(this->dev);
return ret;
+ }
ret = gpmi_reset_block(r->gpmi_regs, false);
if (ret)
@@ -179,9 +181,11 @@ static int gpmi_init(struct gpmi_nand_data *this)
/*
* Decouple the chip select from dma channel. We use dma0 for all
- * the chips.
+ * the chips, force all NAND RDY_BUSY inputs to be sourced from
+ * RDY_BUSY0.
*/
- writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+ writel(BM_GPMI_CTRL1_DECOUPLE_CS | BM_GPMI_CTRL1_GANGED_RDYBUSY,
+ r->gpmi_regs + HW_GPMI_CTRL1_SET);
err_out:
pm_runtime_mark_last_busy(this->dev);
@@ -1611,7 +1615,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
/* Extract interleaved payload data and ECC bits */
for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
if (buf)
- nand_extract_bits(buf, step * eccsize, tmp_buf,
+ nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
src_bit_off, eccsize * 8);
src_bit_off += eccsize * 8;
@@ -2252,7 +2256,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
void *buf_read = NULL;
const void *buf_write = NULL;
bool direct = false;
- struct completion *completion;
+ struct completion *dma_completion, *bch_completion;
unsigned long to;
if (check_only)
@@ -2263,8 +2267,10 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
this->transfers[i].direction = DMA_NONE;
ret = pm_runtime_get_sync(this->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(this->dev);
return ret;
+ }
/*
* This driver currently supports only one NAND chip. Plus, dies share
@@ -2347,22 +2353,24 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
}
+ desc->callback = dma_irq_callback;
+ desc->callback_param = this;
+ dma_completion = &this->dma_done;
+ bch_completion = NULL;
+
+ init_completion(dma_completion);
+
if (this->bch && buf_read) {
writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
this->resources.bch_regs + HW_BCH_CTRL_SET);
- completion = &this->bch_done;
- } else {
- desc->callback = dma_irq_callback;
- desc->callback_param = this;
- completion = &this->dma_done;
+ bch_completion = &this->bch_done;
+ init_completion(bch_completion);
}
- init_completion(completion);
-
dmaengine_submit(desc);
dma_async_issue_pending(get_dma_chan(this));
- to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
+ to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
if (!to) {
dev_err(this->dev, "DMA timeout, last DMA\n");
gpmi_dump_info(this);
@@ -2370,6 +2378,16 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
goto unmap;
}
+ if (this->bch && buf_read) {
+ to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
+ if (!to) {
+ dev_err(this->dev, "BCH timeout, last DMA\n");
+ gpmi_dump_info(this);
+ ret = -ETIMEDOUT;
+ goto unmap;
+ }
+ }
+
writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
this->resources.bch_regs + HW_BCH_CTRL_CLR);
gpmi_clear_bch(this);
@@ -2461,43 +2479,25 @@ err_out:
}
static const struct of_device_id gpmi_nand_id_table[] = {
- {
- .compatible = "fsl,imx23-gpmi-nand",
- .data = &gpmi_devdata_imx23,
- }, {
- .compatible = "fsl,imx28-gpmi-nand",
- .data = &gpmi_devdata_imx28,
- }, {
- .compatible = "fsl,imx6q-gpmi-nand",
- .data = &gpmi_devdata_imx6q,
- }, {
- .compatible = "fsl,imx6sx-gpmi-nand",
- .data = &gpmi_devdata_imx6sx,
- }, {
- .compatible = "fsl,imx7d-gpmi-nand",
- .data = &gpmi_devdata_imx7d,
- }, {}
+ { .compatible = "fsl,imx23-gpmi-nand", .data = &gpmi_devdata_imx23, },
+ { .compatible = "fsl,imx28-gpmi-nand", .data = &gpmi_devdata_imx28, },
+ { .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, },
+ { .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, },
+ { .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,},
+ {}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
static int gpmi_nand_probe(struct platform_device *pdev)
{
struct gpmi_nand_data *this;
- const struct of_device_id *of_id;
int ret;
this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
if (!this)
return -ENOMEM;
- of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
- if (of_id) {
- this->devdata = of_id->data;
- } else {
- dev_err(&pdev->dev, "Failed to find the right device id.\n");
- return -ENODEV;
- }
-
+ this->devdata = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, this);
this->pdev = pdev;
this->dev = &pdev->dev;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h
index f5e4f26c34da..fc31fd084dcf 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h
@@ -107,6 +107,7 @@
#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2
#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3
+#define BM_GPMI_CTRL1_GANGED_RDYBUSY (1 << 19)
#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
#define BP_GPMI_CTRL1_DLL_ENABLE 17
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index 8e22cd6ec71f..efe0ffe4f1ab 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -71,8 +71,6 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
if (!pdev || !platform_get_drvdata(pdev))
return ERR_PTR(-EPROBE_DEFER);
- get_device(&pdev->dev);
-
ecc = platform_get_drvdata(pdev);
clk_prepare_enable(ecc->clk);
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
new file mode 100644
index 000000000000..a304fda5d1fa
--- /dev/null
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2020 Intel Corporation. */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand.h>
+
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/unaligned.h>
+
+#define EBU_CLC 0x000
+#define EBU_CLC_RST 0x00000000u
+
+#define EBU_ADDR_SEL(n) (0x020 + (n) * 4)
+/* 5 bits 26:22 included for comparison in the ADDR_SELx */
+#define EBU_ADDR_MASK(x) ((x) << 4)
+#define EBU_ADDR_SEL_REGEN 0x1
+
+#define EBU_BUSCON(n) (0x060 + (n) * 4)
+#define EBU_BUSCON_CMULT_V4 0x1
+#define EBU_BUSCON_RECOVC(n) ((n) << 2)
+#define EBU_BUSCON_HOLDC(n) ((n) << 4)
+#define EBU_BUSCON_WAITRDC(n) ((n) << 6)
+#define EBU_BUSCON_WAITWRC(n) ((n) << 8)
+#define EBU_BUSCON_BCGEN_CS 0x0
+#define EBU_BUSCON_SETUP_EN BIT(22)
+#define EBU_BUSCON_ALEC 0xC000
+
+#define EBU_CON 0x0B0
+#define EBU_CON_NANDM_EN BIT(0)
+#define EBU_CON_NANDM_DIS 0x0
+#define EBU_CON_CSMUX_E_EN BIT(1)
+#define EBU_CON_ALE_P_LOW BIT(2)
+#define EBU_CON_CLE_P_LOW BIT(3)
+#define EBU_CON_CS_P_LOW BIT(4)
+#define EBU_CON_SE_P_LOW BIT(5)
+#define EBU_CON_WP_P_LOW BIT(6)
+#define EBU_CON_PRE_P_LOW BIT(7)
+#define EBU_CON_IN_CS_S(n) ((n) << 8)
+#define EBU_CON_OUT_CS_S(n) ((n) << 10)
+#define EBU_CON_LAT_EN_CS_P ((0x3D) << 18)
+
+#define EBU_WAIT 0x0B4
+#define EBU_WAIT_RDBY BIT(0)
+#define EBU_WAIT_WR_C BIT(3)
+
+#define HSNAND_CTL1 0x110
+#define HSNAND_CTL1_ADDR_SHIFT 24
+
+#define HSNAND_CTL2 0x114
+#define HSNAND_CTL2_ADDR_SHIFT 8
+#define HSNAND_CTL2_CYC_N_V5 (0x2 << 16)
+
+#define HSNAND_INT_MSK_CTL 0x124
+#define HSNAND_INT_MSK_CTL_WR_C BIT(4)
+
+#define HSNAND_INT_STA 0x128
+#define HSNAND_INT_STA_WR_C BIT(4)
+
+#define HSNAND_CTL 0x130
+#define HSNAND_CTL_ENABLE_ECC BIT(0)
+#define HSNAND_CTL_GO BIT(2)
+#define HSNAND_CTL_CE_SEL_CS(n) BIT(3 + (n))
+#define HSNAND_CTL_RW_READ 0x0
+#define HSNAND_CTL_RW_WRITE BIT(10)
+#define HSNAND_CTL_ECC_OFF_V8TH BIT(11)
+#define HSNAND_CTL_CKFF_EN 0x0
+#define HSNAND_CTL_MSG_EN BIT(17)
+
+#define HSNAND_PARA0 0x13c
+#define HSNAND_PARA0_PAGE_V8192 0x3
+#define HSNAND_PARA0_PIB_V256 (0x3 << 4)
+#define HSNAND_PARA0_BYP_EN_NP 0x0
+#define HSNAND_PARA0_BYP_DEC_NP 0x0
+#define HSNAND_PARA0_TYPE_ONFI BIT(18)
+#define HSNAND_PARA0_ADEP_EN BIT(21)
+
+#define HSNAND_CMSG_0 0x150
+#define HSNAND_CMSG_1 0x154
+
+#define HSNAND_ALE_OFFS BIT(2)
+#define HSNAND_CLE_OFFS BIT(3)
+#define HSNAND_CS_OFFS BIT(4)
+
+#define HSNAND_ECC_OFFSET 0x008
+
+#define NAND_DATA_IFACE_CHECK_ONLY -1
+
+#define MAX_CS 2
+
+#define HZ_PER_MHZ 1000000L
+#define USEC_PER_SEC 1000000L
+
+struct ebu_nand_cs {
+ void __iomem *chipaddr;
+ dma_addr_t nand_pa;
+ u32 addr_sel;
+};
+
+struct ebu_nand_controller {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *ebu;
+ void __iomem *hsnand;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+ struct completion dma_access_complete;
+ unsigned long clk_rate;
+ struct clk *clk;
+ u32 nd_para0;
+ u8 cs_num;
+ struct ebu_nand_cs cs[MAX_CS];
+};
+
+static inline struct ebu_nand_controller *nand_to_ebu(struct nand_chip *chip)
+{
+ return container_of(chip, struct ebu_nand_controller, chip);
+}
+
+static int ebu_nand_waitrdy(struct nand_chip *chip, int timeout_ms)
+{
+ struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
+ u32 status;
+
+ return readl_poll_timeout(ctrl->ebu + EBU_WAIT, status,
+ (status & EBU_WAIT_RDBY) ||
+ (status & EBU_WAIT_WR_C), 20, timeout_ms);
+}
+
+static u8 ebu_nand_readb(struct nand_chip *chip)
+{
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ u8 cs_num = ebu_host->cs_num;
+ u8 val;
+
+ val = readb(ebu_host->cs[cs_num].chipaddr + HSNAND_CS_OFFS);
+ ebu_nand_waitrdy(chip, 1000);
+ return val;
+}
+
+static void ebu_nand_writeb(struct nand_chip *chip, u32 offset, u8 value)
+{
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ u8 cs_num = ebu_host->cs_num;
+
+ writeb(value, ebu_host->cs[cs_num].chipaddr + offset);
+ ebu_nand_waitrdy(chip, 1000);
+}
+
+static void ebu_read_buf(struct nand_chip *chip, u_char *buf, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = ebu_nand_readb(chip);
+}
+
+static void ebu_write_buf(struct nand_chip *chip, const u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ ebu_nand_writeb(chip, HSNAND_CS_OFFS, buf[i]);
+}
+
+static void ebu_nand_disable(struct nand_chip *chip)
+{
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+
+ writel(0, ebu_host->ebu + EBU_CON);
+}
+
+static void ebu_select_chip(struct nand_chip *chip)
+{
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ void __iomem *nand_con = ebu_host->ebu + EBU_CON;
+ u32 cs = ebu_host->cs_num;
+
+ writel(EBU_CON_NANDM_EN | EBU_CON_CSMUX_E_EN | EBU_CON_CS_P_LOW |
+ EBU_CON_SE_P_LOW | EBU_CON_WP_P_LOW | EBU_CON_PRE_P_LOW |
+ EBU_CON_IN_CS_S(cs) | EBU_CON_OUT_CS_S(cs) |
+ EBU_CON_LAT_EN_CS_P, nand_con);
+}
+
+static int ebu_nand_set_timings(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
+ unsigned int rate = clk_get_rate(ctrl->clk) / HZ_PER_MHZ;
+ unsigned int period = DIV_ROUND_UP(USEC_PER_SEC, rate);
+ const struct nand_sdr_timings *timings;
+ u32 trecov, thold, twrwait, trdwait;
+ u32 reg = 0;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ trecov = DIV_ROUND_UP(max(timings->tREA_max, timings->tREH_min),
+ period);
+ reg |= EBU_BUSCON_RECOVC(trecov);
+
+ thold = DIV_ROUND_UP(max(timings->tDH_min, timings->tDS_min), period);
+ reg |= EBU_BUSCON_HOLDC(thold);
+
+ trdwait = DIV_ROUND_UP(max(timings->tRC_min, timings->tREH_min),
+ period);
+ reg |= EBU_BUSCON_WAITRDC(trdwait);
+
+ twrwait = DIV_ROUND_UP(max(timings->tWC_min, timings->tWH_min), period);
+ reg |= EBU_BUSCON_WAITWRC(twrwait);
+
+ reg |= EBU_BUSCON_CMULT_V4 | EBU_BUSCON_BCGEN_CS | EBU_BUSCON_ALEC |
+ EBU_BUSCON_SETUP_EN;
+
+ writel(reg, ctrl->ebu + EBU_BUSCON(ctrl->cs_num));
+
+ return 0;
+}
+
+static int ebu_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = HSNAND_ECC_OFFSET;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int ebu_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = chip->ecc.total + HSNAND_ECC_OFFSET;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ebu_nand_ooblayout_ops = {
+ .ecc = ebu_nand_ooblayout_ecc,
+ .free = ebu_nand_ooblayout_free,
+};
+
+static void ebu_dma_rx_callback(void *cookie)
+{
+ struct ebu_nand_controller *ebu_host = cookie;
+
+ dmaengine_terminate_async(ebu_host->dma_rx);
+
+ complete(&ebu_host->dma_access_complete);
+}
+
+static void ebu_dma_tx_callback(void *cookie)
+{
+ struct ebu_nand_controller *ebu_host = cookie;
+
+ dmaengine_terminate_async(ebu_host->dma_tx);
+
+ complete(&ebu_host->dma_access_complete);
+}
+
+static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
+ const u8 *buf, u32 len)
+{
+ struct dma_async_tx_descriptor *tx;
+ struct completion *dma_completion;
+ dma_async_tx_callback callback;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+ unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ dma_addr_t buf_dma;
+ int ret;
+ u32 timeout;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ chan = ebu_host->dma_rx;
+ dma_completion = &ebu_host->dma_access_complete;
+ callback = ebu_dma_rx_callback;
+ } else {
+ chan = ebu_host->dma_tx;
+ dma_completion = &ebu_host->dma_access_complete;
+ callback = ebu_dma_tx_callback;
+ }
+
+ buf_dma = dma_map_single(chan->device->dev, (void *)buf, len, dir);
+ if (dma_mapping_error(chan->device->dev, buf_dma)) {
+ dev_err(ebu_host->dev, "Failed to map DMA buffer\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags);
+ if (!tx)
+ return -ENXIO;
+
+ tx->callback = callback;
+ tx->callback_param = ebu_host;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(ebu_host->dev, "dma_submit_error %d\n", cookie);
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ init_completion(dma_completion);
+ dma_async_issue_pending(chan);
+
+ /* Wait DMA to finish the data transfer.*/
+ timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
+ if (!timeout) {
+ dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n",
+ dmaengine_tx_status(chan, cookie, NULL));
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(ebu_host->dev, buf_dma, len, dir);
+
+ return ret;
+}
+
+static void ebu_nand_trigger(struct ebu_nand_controller *ebu_host,
+ int page, u32 cmd)
+{
+ unsigned int val;
+
+ val = cmd | (page & 0xFF) << HSNAND_CTL1_ADDR_SHIFT;
+ writel(val, ebu_host->hsnand + HSNAND_CTL1);
+ val = (page & 0xFFFF00) >> 8 | HSNAND_CTL2_CYC_N_V5;
+ writel(val, ebu_host->hsnand + HSNAND_CTL2);
+
+ writel(ebu_host->nd_para0, ebu_host->hsnand + HSNAND_PARA0);
+
+ /* clear first, will update later */
+ writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_0);
+ writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_1);
+
+ writel(HSNAND_INT_MSK_CTL_WR_C,
+ ebu_host->hsnand + HSNAND_INT_MSK_CTL);
+
+ if (!cmd)
+ val = HSNAND_CTL_RW_READ;
+ else
+ val = HSNAND_CTL_RW_WRITE;
+
+ writel(HSNAND_CTL_MSG_EN | HSNAND_CTL_CKFF_EN |
+ HSNAND_CTL_ECC_OFF_V8TH | HSNAND_CTL_CE_SEL_CS(ebu_host->cs_num) |
+ HSNAND_CTL_ENABLE_ECC | HSNAND_CTL_GO | val,
+ ebu_host->hsnand + HSNAND_CTL);
+}
+
+static int ebu_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ int ret, reg_data;
+
+ ebu_nand_trigger(ebu_host, page, NAND_CMD_READ0);
+
+ ret = ebu_dma_start(ebu_host, DMA_DEV_TO_MEM, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required)
+ chip->ecc.read_oob(chip, page);
+
+ reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
+ reg_data &= ~HSNAND_CTL_GO;
+ writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
+
+ return 0;
+}
+
+static int ebu_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ void __iomem *int_sta = ebu_host->hsnand + HSNAND_INT_STA;
+ int reg_data, ret, val;
+ u32 reg;
+
+ ebu_nand_trigger(ebu_host, page, NAND_CMD_SEQIN);
+
+ ret = ebu_dma_start(ebu_host, DMA_MEM_TO_DEV, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ reg = get_unaligned_le32(chip->oob_poi);
+ writel(reg, ebu_host->hsnand + HSNAND_CMSG_0);
+
+ reg = get_unaligned_le32(chip->oob_poi + 4);
+ writel(reg, ebu_host->hsnand + HSNAND_CMSG_1);
+ }
+
+ ret = readl_poll_timeout_atomic(int_sta, val, !(val & HSNAND_INT_STA_WR_C),
+ 10, 1000);
+ if (ret)
+ return ret;
+
+ reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
+ reg_data &= ~HSNAND_CTL_GO;
+ writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
+
+ return 0;
+}
+
+static const u8 ecc_strength[] = { 1, 1, 4, 8, 24, 32, 40, 60, };
+
+static int ebu_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ u32 ecc_steps, ecc_bytes, ecc_total, pagesize, pg_per_blk;
+ u32 ecc_strength_ds = chip->ecc.strength;
+ u32 ecc_size = chip->ecc.size;
+ u32 writesize = mtd->writesize;
+ u32 blocksize = mtd->erasesize;
+ int bch_algo, start, val;
+
+ /* Default to an ECC size of 512 */
+ if (!chip->ecc.size)
+ chip->ecc.size = 512;
+
+ switch (ecc_size) {
+ case 512:
+ start = 1;
+ if (!ecc_strength_ds)
+ ecc_strength_ds = 4;
+ break;
+ case 1024:
+ start = 4;
+ if (!ecc_strength_ds)
+ ecc_strength_ds = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* BCH ECC algorithm Settings for number of bits per 512B/1024B */
+ bch_algo = round_up(start + 1, 4);
+ for (val = start; val < bch_algo; val++) {
+ if (ecc_strength_ds == ecc_strength[val])
+ break;
+ }
+ if (val == bch_algo)
+ return -EINVAL;
+
+ if (ecc_strength_ds == 8)
+ ecc_bytes = 14;
+ else
+ ecc_bytes = DIV_ROUND_UP(ecc_strength_ds * fls(8 * ecc_size), 8);
+
+ ecc_steps = writesize / ecc_size;
+ ecc_total = ecc_steps * ecc_bytes;
+ if ((ecc_total + 8) > mtd->oobsize)
+ return -ERANGE;
+
+ chip->ecc.total = ecc_total;
+ pagesize = fls(writesize >> 11);
+ if (pagesize > HSNAND_PARA0_PAGE_V8192)
+ return -ERANGE;
+
+ pg_per_blk = fls((blocksize / writesize) >> 6) / 8;
+ if (pg_per_blk > HSNAND_PARA0_PIB_V256)
+ return -ERANGE;
+
+ ebu_host->nd_para0 = pagesize | pg_per_blk | HSNAND_PARA0_BYP_EN_NP |
+ HSNAND_PARA0_BYP_DEC_NP | HSNAND_PARA0_ADEP_EN |
+ HSNAND_PARA0_TYPE_ONFI | (val << 29);
+
+ mtd_set_ooblayout(mtd, &ebu_nand_ooblayout_ops);
+ chip->ecc.read_page = ebu_nand_read_page_hwecc;
+ chip->ecc.write_page = ebu_nand_write_page_hwecc;
+
+ return 0;
+}
+
+static int ebu_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id;
+ int i, timeout_ms, ret = 0;
+
+ if (check_only)
+ return 0;
+
+ ebu_select_chip(chip);
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ebu_nand_writeb(chip, HSNAND_CLE_OFFS | HSNAND_CS_OFFS,
+ instr->ctx.cmd.opcode);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ ebu_nand_writeb(chip,
+ HSNAND_ALE_OFFS | HSNAND_CS_OFFS,
+ instr->ctx.addr.addrs[i]);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ ebu_read_buf(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ ebu_write_buf(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout_ms = instr->ctx.waitrdy.timeout_ms * 1000;
+ ret = ebu_nand_waitrdy(chip, timeout_ms);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops ebu_nand_controller_ops = {
+ .attach_chip = ebu_nand_attach_chip,
+ .setup_interface = ebu_nand_set_timings,
+ .exec_op = ebu_nand_exec_op,
+};
+
+static void ebu_dma_cleanup(struct ebu_nand_controller *ebu_host)
+{
+ if (ebu_host->dma_rx)
+ dma_release_channel(ebu_host->dma_rx);
+
+ if (ebu_host->dma_tx)
+ dma_release_channel(ebu_host->dma_tx);
+}
+
+static int ebu_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ebu_nand_controller *ebu_host;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ struct resource *res;
+ char *resname;
+ int ret;
+ u32 cs;
+
+ ebu_host = devm_kzalloc(dev, sizeof(*ebu_host), GFP_KERNEL);
+ if (!ebu_host)
+ return -ENOMEM;
+
+ ebu_host->dev = dev;
+ nand_controller_init(&ebu_host->controller);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ebunand");
+ ebu_host->ebu = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ebu_host->ebu))
+ return PTR_ERR(ebu_host->ebu);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsnand");
+ ebu_host->hsnand = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ebu_host->hsnand))
+ return PTR_ERR(ebu_host->hsnand);
+
+ ret = device_property_read_u32(dev, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "failed to get chip select: %d\n", ret);
+ return ret;
+ }
+ ebu_host->cs_num = cs;
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
+ ebu_host->cs[cs].chipaddr = devm_ioremap_resource(dev, res);
+ ebu_host->cs[cs].nand_pa = res->start;
+ if (IS_ERR(ebu_host->cs[cs].chipaddr))
+ return PTR_ERR(ebu_host->cs[cs].chipaddr);
+
+ ebu_host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ebu_host->clk))
+ return dev_err_probe(dev, PTR_ERR(ebu_host->clk),
+ "failed to get clock\n");
+
+ ret = clk_prepare_enable(ebu_host->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+ ebu_host->clk_rate = clk_get_rate(ebu_host->clk);
+
+ ebu_host->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ebu_host->dma_tx))
+ return dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
+ "failed to request DMA tx chan!.\n");
+
+ ebu_host->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ebu_host->dma_rx))
+ return dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
+ "failed to request DMA rx chan!.\n");
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
+ if (!res)
+ return -EINVAL;
+ ebu_host->cs[cs].addr_sel = res->start;
+ writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
+ ebu_host->ebu + EBU_ADDR_SEL(cs));
+
+ nand_set_flash_node(&ebu_host->chip, dev->of_node);
+
+ mtd = nand_to_mtd(&ebu_host->chip);
+ if (!mtd->name) {
+ dev_err(ebu_host->dev, "NAND label property is mandatory\n");
+ return -EINVAL;
+ }
+
+ mtd->dev.parent = dev;
+ ebu_host->dev = dev;
+
+ platform_set_drvdata(pdev, ebu_host);
+ nand_set_controller_data(&ebu_host->chip, ebu_host);
+
+ nand = &ebu_host->chip;
+ nand->controller = &ebu_host->controller;
+ nand->controller->ops = &ebu_nand_controller_ops;
+
+ /* Scan to find existence of the device */
+ ret = nand_scan(&ebu_host->chip, 1);
+ if (ret)
+ goto err_cleanup_dma;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_clean_nand;
+
+ return 0;
+
+err_clean_nand:
+ nand_cleanup(&ebu_host->chip);
+err_cleanup_dma:
+ ebu_dma_cleanup(ebu_host);
+ clk_disable_unprepare(ebu_host->clk);
+
+ return ret;
+}
+
+static int ebu_nand_remove(struct platform_device *pdev)
+{
+ struct ebu_nand_controller *ebu_host = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(&ebu_host->chip));
+ WARN_ON(ret);
+ nand_cleanup(&ebu_host->chip);
+ ebu_nand_disable(&ebu_host->chip);
+ ebu_dma_cleanup(ebu_host);
+ clk_disable_unprepare(ebu_host->clk);
+
+ return 0;
+}
+
+static const struct of_device_id ebu_nand_match[] = {
+ { .compatible = "intel,nand-controller" },
+ { .compatible = "intel,lgm-ebunand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ebu_nand_match);
+
+static struct platform_driver ebu_nand_driver = {
+ .probe = ebu_nand_probe,
+ .remove = ebu_nand_remove,
+ .driver = {
+ .name = "intel-nand-controller",
+ .of_match_table = ebu_nand_match,
+ },
+
+};
+module_platform_driver(ebu_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
+MODULE_DESCRIPTION("Intel's LGM External Bus NAND Controller driver");
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 9e728c731795..452ecaf7775a 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -31,7 +31,6 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/mtd/nand_ecc.h>
#define DRV_NAME "lpc32xx_mlc"
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index dc7785e30d2f..6b7269cfb7d8 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -23,7 +23,6 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -803,7 +802,7 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
- chip->ecc.correct = nand_correct_data;
+ chip->ecc.correct = rawnand_sw_hamming_correct;
chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
/*
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index f5ca2002d08e..42d4881d598d 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2679,12 +2679,6 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
mtd->dev.parent = dev;
/*
- * Default to HW ECC engine mode. If the nand-ecc-mode property is given
- * in the DT node, this entry will be overwritten in nand_scan_ident().
- */
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
-
- /*
* Save a reference value for timing registers before
* ->setup_interface() is called.
*/
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 48e6dac96be6..817bddccb775 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -510,7 +510,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
}
static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
- int infolen, int datalen,
+ int datalen, int infolen,
enum dma_data_direction dir)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
@@ -1044,9 +1044,12 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
ret = clk_set_rate(nfc->device_clk, 24000000);
if (ret)
- goto err_phase_rx;
+ goto err_disable_rx;
return 0;
+
+err_disable_rx:
+ clk_disable_unprepare(nfc->phase_rx);
err_phase_rx:
clk_disable_unprepare(nfc->phase_tx);
err_phase_tx:
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 684c51e5e60d..fd705dd1768d 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -21,7 +21,6 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/platform_data/mtd-mxc_nand.h>
#define DRIVER_NAME "mxc_nand"
@@ -184,7 +183,6 @@ struct mxc_nand_host {
unsigned int buf_start;
const struct mxc_nand_devtype_data *devtype_data;
- struct mxc_nand_platform_data pdata;
};
static const char * const part_probes[] = {
@@ -1611,70 +1609,16 @@ static inline int is_imx53_nfc(struct mxc_nand_host *host)
return host->devtype_data == &imx53_nand_devtype_data;
}
-static const struct platform_device_id mxcnd_devtype[] = {
- {
- .name = "imx21-nand",
- .driver_data = (kernel_ulong_t) &imx21_nand_devtype_data,
- }, {
- .name = "imx27-nand",
- .driver_data = (kernel_ulong_t) &imx27_nand_devtype_data,
- }, {
- .name = "imx25-nand",
- .driver_data = (kernel_ulong_t) &imx25_nand_devtype_data,
- }, {
- .name = "imx51-nand",
- .driver_data = (kernel_ulong_t) &imx51_nand_devtype_data,
- }, {
- .name = "imx53-nand",
- .driver_data = (kernel_ulong_t) &imx53_nand_devtype_data,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
-
-#ifdef CONFIG_OF
static const struct of_device_id mxcnd_dt_ids[] = {
- {
- .compatible = "fsl,imx21-nand",
- .data = &imx21_nand_devtype_data,
- }, {
- .compatible = "fsl,imx27-nand",
- .data = &imx27_nand_devtype_data,
- }, {
- .compatible = "fsl,imx25-nand",
- .data = &imx25_nand_devtype_data,
- }, {
- .compatible = "fsl,imx51-nand",
- .data = &imx51_nand_devtype_data,
- }, {
- .compatible = "fsl,imx53-nand",
- .data = &imx53_nand_devtype_data,
- },
+ { .compatible = "fsl,imx21-nand", .data = &imx21_nand_devtype_data, },
+ { .compatible = "fsl,imx27-nand", .data = &imx27_nand_devtype_data, },
+ { .compatible = "fsl,imx25-nand", .data = &imx25_nand_devtype_data, },
+ { .compatible = "fsl,imx51-nand", .data = &imx51_nand_devtype_data, },
+ { .compatible = "fsl,imx53-nand", .data = &imx53_nand_devtype_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
-static int mxcnd_probe_dt(struct mxc_nand_host *host)
-{
- struct device_node *np = host->dev->of_node;
- const struct of_device_id *of_id =
- of_match_device(mxcnd_dt_ids, host->dev);
-
- if (!np)
- return 1;
-
- host->devtype_data = of_id->data;
-
- return 0;
-}
-#else
-static int mxcnd_probe_dt(struct mxc_nand_host *host)
-{
- return 1;
-}
-#endif
-
static int mxcnd_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -1800,20 +1744,7 @@ static int mxcnd_probe(struct platform_device *pdev)
if (IS_ERR(host->clk))
return PTR_ERR(host->clk);
- err = mxcnd_probe_dt(host);
- if (err > 0) {
- struct mxc_nand_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
- if (pdata) {
- host->pdata = *pdata;
- host->devtype_data = (struct mxc_nand_devtype_data *)
- pdev->id_entry->driver_data;
- } else {
- err = -ENODEV;
- }
- }
- if (err < 0)
- return err;
+ host->devtype_data = device_get_match_data(&pdev->dev);
if (!host->devtype_data->setup_interface)
this->options |= NAND_KEEP_TIMINGS;
@@ -1843,14 +1774,6 @@ static int mxcnd_probe(struct platform_device *pdev)
this->legacy.select_chip = host->devtype_data->select_chip;
- /* NAND bus width determines access functions used by upper layer */
- if (host->pdata.width == 2)
- this->options |= NAND_BUSWIDTH_16;
-
- /* update flash based bbt */
- if (host->pdata.flash_bbt)
- this->bbt_options |= NAND_BBT_USE_FLASH;
-
init_completion(&host->op_completion);
host->irq = platform_get_irq(pdev, 0);
@@ -1891,9 +1814,7 @@ static int mxcnd_probe(struct platform_device *pdev)
goto escan;
/* Register the partitions */
- err = mtd_device_parse_register(mtd, part_probes, NULL,
- host->pdata.parts,
- host->pdata.nr_parts);
+ err = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
if (err)
goto cleanup_nand;
@@ -1930,7 +1851,6 @@ static struct platform_driver mxcnd_driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(mxcnd_dt_ids),
},
- .id_table = mxcnd_devtype,
.probe = mxcnd_probe,
.remove = mxcnd_remove,
};
diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c
index d66b5b0971fa..da1070993994 100644
--- a/drivers/mtd/nand/raw/mxic_nand.c
+++ b/drivers/mtd/nand/raw/mxic_nand.c
@@ -12,8 +12,8 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/platform_device.h>
#include "internals.h"
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 1f0d542d5923..c33fa1b1847f 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -35,8 +35,8 @@
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/io.h>
@@ -5139,6 +5139,118 @@ static void nand_scan_ident_cleanup(struct nand_chip *chip)
kfree(chip->parameters.onfi);
}
+int rawnand_sw_hamming_init(struct nand_chip *chip)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf;
+ struct nand_device *base = &chip->base;
+ int ret;
+
+ base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
+ base->ecc.user_conf.strength = chip->ecc.strength;
+ base->ecc.user_conf.step_size = chip->ecc.size;
+
+ ret = nand_ecc_sw_hamming_init_ctx(base);
+ if (ret)
+ return ret;
+
+ engine_conf = base->ecc.ctx.priv;
+
+ if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
+ engine_conf->sm_order = true;
+
+ chip->ecc.size = base->ecc.ctx.conf.step_size;
+ chip->ecc.strength = base->ecc.ctx.conf.strength;
+ chip->ecc.total = base->ecc.ctx.total;
+ chip->ecc.steps = engine_conf->nsteps;
+ chip->ecc.bytes = engine_conf->code_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(rawnand_sw_hamming_init);
+
+int rawnand_sw_hamming_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ struct nand_device *base = &chip->base;
+
+ return nand_ecc_sw_hamming_calculate(base, buf, code);
+}
+EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
+
+int rawnand_sw_hamming_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ struct nand_device *base = &chip->base;
+
+ return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
+}
+EXPORT_SYMBOL(rawnand_sw_hamming_correct);
+
+void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+
+ nand_ecc_sw_hamming_cleanup_ctx(base);
+}
+EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
+
+int rawnand_sw_bch_init(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_sw_bch_conf *engine_conf;
+ int ret;
+
+ base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
+ base->ecc.user_conf.step_size = chip->ecc.size;
+ base->ecc.user_conf.strength = chip->ecc.strength;
+
+ ret = nand_ecc_sw_bch_init_ctx(base);
+ if (ret)
+ return ret;
+
+ engine_conf = base->ecc.ctx.priv;
+
+ chip->ecc.size = base->ecc.ctx.conf.step_size;
+ chip->ecc.strength = base->ecc.ctx.conf.strength;
+ chip->ecc.total = base->ecc.ctx.total;
+ chip->ecc.steps = engine_conf->nsteps;
+ chip->ecc.bytes = engine_conf->code_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(rawnand_sw_bch_init);
+
+static int rawnand_sw_bch_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ struct nand_device *base = &chip->base;
+
+ return nand_ecc_sw_bch_calculate(base, buf, code);
+}
+
+int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ struct nand_device *base = &chip->base;
+
+ return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
+}
+EXPORT_SYMBOL(rawnand_sw_bch_correct);
+
+void rawnand_sw_bch_cleanup(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+
+ nand_ecc_sw_bch_cleanup_ctx(base);
+}
+EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
+
static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
{
struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -5203,14 +5315,15 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_device *nanddev = mtd_to_nanddev(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret;
if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
return -EINVAL;
switch (ecc->algo) {
case NAND_ECC_ALGO_HAMMING:
- ecc->calculate = nand_calculate_ecc;
- ecc->correct = nand_correct_data;
+ ecc->calculate = rawnand_sw_hamming_calculate;
+ ecc->correct = rawnand_sw_hamming_correct;
ecc->read_page = nand_read_page_swecc;
ecc->read_subpage = nand_read_subpage;
ecc->write_page = nand_write_page_swecc;
@@ -5228,14 +5341,20 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
+ ret = rawnand_sw_hamming_init(chip);
+ if (ret) {
+ WARN(1, "Hamming ECC initialization failed!\n");
+ return ret;
+ }
+
return 0;
case NAND_ECC_ALGO_BCH:
- if (!mtd_nand_has_bch()) {
+ if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
return -EINVAL;
}
- ecc->calculate = nand_bch_calculate_ecc;
- ecc->correct = nand_bch_correct_data;
+ ecc->calculate = rawnand_sw_bch_calculate;
+ ecc->correct = rawnand_sw_bch_correct;
ecc->read_page = nand_read_page_swecc;
ecc->read_subpage = nand_read_subpage;
ecc->write_page = nand_write_page_swecc;
@@ -5247,55 +5366,20 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
ecc->write_oob = nand_write_oob_std;
/*
- * Board driver should supply ecc.size and ecc.strength
- * values to select how many bits are correctable.
- * Otherwise, default to 4 bits for large page devices.
- */
- if (!ecc->size && (mtd->oobsize >= 64)) {
- ecc->size = 512;
- ecc->strength = 4;
- }
-
- /*
- * if no ecc placement scheme was provided pickup the default
- * large page one.
- */
- if (!mtd->ooblayout) {
- /* handle large page devices only */
- if (mtd->oobsize < 64) {
- WARN(1, "OOB layout is required when using software BCH on small pages\n");
- return -EINVAL;
- }
-
- mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
-
- }
-
- /*
* We can only maximize ECC config when the default layout is
* used, otherwise we don't know how many bytes can really be
* used.
*/
- if (mtd->ooblayout == nand_get_large_page_ooblayout() &&
- nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
- int steps, bytes;
-
- /* Always prefer 1k blocks over 512bytes ones */
- ecc->size = 1024;
- steps = mtd->writesize / ecc->size;
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
+ mtd->ooblayout != nand_get_large_page_ooblayout())
+ nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
- /* Reserve 2 bytes for the BBM */
- bytes = (mtd->oobsize - 2) / steps;
- ecc->strength = bytes * 8 / fls(8 * ecc->size);
- }
-
- /* See nand_bch_init() for details. */
- ecc->bytes = 0;
- ecc->priv = nand_bch_init(mtd);
- if (!ecc->priv) {
+ ret = rawnand_sw_bch_init(chip);
+ if (ret) {
WARN(1, "BCH ECC initialization failed!\n");
- return -EINVAL;
+ return ret;
}
+
return 0;
default:
WARN(1, "Unsupported ECC algorithm!\n");
@@ -5639,7 +5723,9 @@ static int nand_scan_tail(struct nand_chip *chip)
*/
if (!mtd->ooblayout &&
!(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
- ecc->algo == NAND_ECC_ALGO_BCH)) {
+ ecc->algo == NAND_ECC_ALGO_BCH) &&
+ !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ ecc->algo == NAND_ECC_ALGO_HAMMING)) {
switch (mtd->oobsize) {
case 8:
case 16:
@@ -5756,15 +5842,18 @@ static int nand_scan_tail(struct nand_chip *chip)
* Set the number of read / write steps for one page depending on ECC
* mode.
*/
- ecc->steps = mtd->writesize / ecc->size;
+ if (!ecc->steps)
+ ecc->steps = mtd->writesize / ecc->size;
if (ecc->steps * ecc->size != mtd->writesize) {
WARN(1, "Invalid ECC parameters\n");
ret = -EINVAL;
goto err_nand_manuf_cleanup;
}
- ecc->total = ecc->steps * ecc->bytes;
- chip->base.ecc.ctx.total = ecc->total;
+ if (!ecc->total) {
+ ecc->total = ecc->steps * ecc->bytes;
+ chip->base.ecc.ctx.total = ecc->total;
+ }
if (ecc->total > mtd->oobsize) {
WARN(1, "Total number of ECC bytes exceeded oobsize\n");
@@ -5953,9 +6042,12 @@ EXPORT_SYMBOL(nand_scan_with_ids);
*/
void nand_cleanup(struct nand_chip *chip)
{
- if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
- chip->ecc.algo == NAND_ECC_ALGO_BCH)
- nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
+ if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
+ rawnand_sw_hamming_cleanup(chip);
+ else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
+ rawnand_sw_bch_cleanup(chip);
+ }
nanddev_cleanup(&chip->base);
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 344a24fd2ca8..dced32a126d9 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -1087,7 +1087,7 @@ static int nand_update_bbt(struct nand_chip *this, loff_t offs)
}
/**
- * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * mark_bbt_region - [GENERIC] mark the bad block table regions
* @this: the NAND device
* @td: bad block table descriptor
*
diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c
deleted file mode 100644
index 9d19ac14c196..000000000000
--- a/drivers/mtd/nand/raw/nand_bch.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * This file provides ECC correction for more than 1 bit per block of data,
- * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
- *
- * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_bch.h>
-#include <linux/bch.h>
-
-/**
- * struct nand_bch_control - private NAND BCH control structure
- * @bch: BCH control structure
- * @errloc: error location array
- * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
- */
-struct nand_bch_control {
- struct bch_control *bch;
- unsigned int *errloc;
- unsigned char *eccmask;
-};
-
-/**
- * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
- * @chip: NAND chip object
- * @buf: input buffer with raw data
- * @code: output buffer with ECC
- */
-int nand_bch_calculate_ecc(struct nand_chip *chip, const unsigned char *buf,
- unsigned char *code)
-{
- struct nand_bch_control *nbc = chip->ecc.priv;
- unsigned int i;
-
- memset(code, 0, chip->ecc.bytes);
- bch_encode(nbc->bch, buf, chip->ecc.size, code);
-
- /* apply mask so that an erased page is a valid codeword */
- for (i = 0; i < chip->ecc.bytes; i++)
- code[i] ^= nbc->eccmask[i];
-
- return 0;
-}
-EXPORT_SYMBOL(nand_bch_calculate_ecc);
-
-/**
- * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @chip: NAND chip object
- * @buf: raw data read from the chip
- * @read_ecc: ECC from the chip
- * @calc_ecc: the ECC calculated from raw data
- *
- * Detect and correct bit errors for a data byte block
- */
-int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc)
-{
- struct nand_bch_control *nbc = chip->ecc.priv;
- unsigned int *errloc = nbc->errloc;
- int i, count;
-
- count = bch_decode(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
- NULL, errloc);
- if (count > 0) {
- for (i = 0; i < count; i++) {
- if (errloc[i] < (chip->ecc.size*8))
- /* error is located in data, correct it */
- buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
- /* else error in ecc, no action needed */
-
- pr_debug("%s: corrected bitflip %u\n", __func__,
- errloc[i]);
- }
- } else if (count < 0) {
- pr_err("ecc unrecoverable error\n");
- count = -EBADMSG;
- }
- return count;
-}
-EXPORT_SYMBOL(nand_bch_correct_data);
-
-/**
- * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
- * @mtd: MTD block structure
- *
- * Returns:
- * a pointer to a new NAND BCH control structure, or NULL upon failure
- *
- * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
- * are used to compute BCH parameters m (Galois field order) and t (error
- * correction capability). @eccbytes should be equal to the number of bytes
- * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
- *
- * Example: to configure 4 bit correction per 512 bytes, you should pass
- * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
- * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
- */
-struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
- unsigned int m, t, eccsteps, i;
- struct nand_bch_control *nbc = NULL;
- unsigned char *erased_page;
- unsigned int eccsize = nand->ecc.size;
- unsigned int eccbytes = nand->ecc.bytes;
- unsigned int eccstrength = nand->ecc.strength;
-
- if (!eccbytes && eccstrength) {
- eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
- nand->ecc.bytes = eccbytes;
- }
-
- if (!eccsize || !eccbytes) {
- pr_warn("ecc parameters not supplied\n");
- goto fail;
- }
-
- m = fls(1+8*eccsize);
- t = (eccbytes*8)/m;
-
- nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
- if (!nbc)
- goto fail;
-
- nbc->bch = bch_init(m, t, 0, false);
- if (!nbc->bch)
- goto fail;
-
- /* verify that eccbytes has the expected value */
- if (nbc->bch->ecc_bytes != eccbytes) {
- pr_warn("invalid eccbytes %u, should be %u\n",
- eccbytes, nbc->bch->ecc_bytes);
- goto fail;
- }
-
- eccsteps = mtd->writesize/eccsize;
-
- /* Check that we have an oob layout description. */
- if (!mtd->ooblayout) {
- pr_warn("missing oob scheme");
- goto fail;
- }
-
- /* sanity checks */
- if (8*(eccsize+eccbytes) >= (1 << m)) {
- pr_warn("eccsize %u is too large\n", eccsize);
- goto fail;
- }
-
- /*
- * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(),
- * which is called by mtd_ooblayout_count_eccbytes().
- * Make sure they are properly initialized before calling
- * mtd_ooblayout_count_eccbytes().
- * FIXME: we should probably rework the sequencing in nand_scan_tail()
- * to avoid setting those fields twice.
- */
- nand->ecc.steps = eccsteps;
- nand->ecc.total = eccsteps * eccbytes;
- nand->base.ecc.ctx.total = nand->ecc.total;
- if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
- pr_warn("invalid ecc layout\n");
- goto fail;
- }
-
- nbc->eccmask = kzalloc(eccbytes, GFP_KERNEL);
- nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL);
- if (!nbc->eccmask || !nbc->errloc)
- goto fail;
- /*
- * compute and store the inverted ecc of an erased ecc block
- */
- erased_page = kmalloc(eccsize, GFP_KERNEL);
- if (!erased_page)
- goto fail;
-
- memset(erased_page, 0xff, eccsize);
- bch_encode(nbc->bch, erased_page, eccsize, nbc->eccmask);
- kfree(erased_page);
-
- for (i = 0; i < eccbytes; i++)
- nbc->eccmask[i] ^= 0xff;
-
- if (!eccstrength)
- nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
-
- return nbc;
-fail:
- nand_bch_free(nbc);
- return NULL;
-}
-EXPORT_SYMBOL(nand_bch_init);
-
-/**
- * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
- * @nbc: NAND BCH control structure
- */
-void nand_bch_free(struct nand_bch_control *nbc)
-{
- if (nbc) {
- bch_free(nbc->bch);
- kfree(nbc->errloc);
- kfree(nbc->eccmask);
- kfree(nbc);
- }
-}
-EXPORT_SYMBOL(nand_bch_free);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
-MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index 2bcc03714432..eccc18b266d5 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -192,9 +192,10 @@ static void panic_nand_wait_ready(struct nand_chip *chip, unsigned long timeo)
*/
void nand_wait_ready(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
unsigned long timeo = 400;
- if (in_interrupt() || oops_in_progress)
+ if (mtd->oops_panic_write)
return panic_nand_wait_ready(chip, timeo);
/* Wait until command is processed or timeout occurs */
@@ -531,7 +532,7 @@ EXPORT_SYMBOL(nand_get_set_features_notsupp);
*/
static int nand_wait(struct nand_chip *chip)
{
-
+ struct mtd_info *mtd = nand_to_mtd(chip);
unsigned long timeo = 400;
u8 status;
int ret;
@@ -546,9 +547,9 @@ static int nand_wait(struct nand_chip *chip)
if (ret)
return ret;
- if (in_interrupt() || oops_in_progress)
+ if (mtd->oops_panic_write) {
panic_nand_wait(chip, timeo);
- else {
+ } else {
timeo = jiffies + msecs_to_jiffies(timeo);
do {
if (chip->legacy.dev_ready) {
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index a8048cb8d220..0750121ac371 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_bch.h>
#include <linux/mtd/partitions.h>
#include <linux/delay.h>
#include <linux/list.h>
@@ -2211,10 +2210,13 @@ static int ns_attach_chip(struct nand_chip *chip)
{
unsigned int eccsteps, eccbytes;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
if (!bch)
return 0;
- if (!mtd_nand_has_bch()) {
+ if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
NS_ERR("BCH ECC support is disabled\n");
return -EINVAL;
}
@@ -2234,8 +2236,6 @@ static int ns_attach_chip(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.size = 512;
chip->ecc.strength = bch;
chip->ecc.bytes = eccbytes;
@@ -2274,8 +2274,6 @@ static int __init ns_init_module(void)
nsmtd = nand_to_mtd(chip);
nand_set_controller_data(chip, (void *)ns);
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 0fb4ba93c41e..338d6b1a189e 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
@@ -146,7 +145,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->controller = &ndfc->ndfc_control;
chip->legacy.read_buf = ndfc_read_buf;
chip->legacy.write_buf = ndfc_write_buf;
- chip->ecc.correct = nand_correct_data;
+ chip->ecc.correct = rawnand_sw_hamming_correct;
chip->ecc.hwctl = ndfc_enable_hwecc;
chip->ecc.calculate = ndfc_calculate_ecc;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 512f60780a50..2c3e65cb68f3 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -15,6 +15,7 @@
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
@@ -23,7 +24,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/mtd/nand_bch.h>
#include <linux/platform_data/elm.h>
#include <linux/omap-gpmc.h>
@@ -185,6 +185,7 @@ static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
* @dma_mode: dma mode enable (1) or disable (0)
* @u32_count: number of bytes to be transferred
* @is_write: prefetch read(0) or write post(1) mode
+ * @info: NAND device structure containing platform data
*/
static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
unsigned int u32_count, int is_write, struct omap_nand_info *info)
@@ -214,7 +215,7 @@ static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
return 0;
}
-/**
+/*
* omap_prefetch_reset - disables and stops the prefetch engine
*/
static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
@@ -939,7 +940,7 @@ static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
/**
* omap_enable_hwecc - This function enables the hardware ecc functionality
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @mode: Read/Write mode
*/
static void omap_enable_hwecc(struct nand_chip *chip, int mode)
@@ -1009,7 +1010,7 @@ static int omap_wait(struct nand_chip *this)
/**
* omap_dev_ready - checks the NAND Ready GPIO line
- * @mtd: MTD device structure
+ * @chip: NAND chip object
*
* Returns true if ready and false if busy.
*/
@@ -1022,7 +1023,7 @@ static int omap_dev_ready(struct nand_chip *chip)
/**
* omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @mode: Read/Write mode
*
* When using BCH with SW correction (i.e. no ELM), sector size is set
@@ -1131,7 +1132,7 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
* _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
- * @ecc_code: The ecc_code buffer
+ * @ecc_calc: The ecc_code buffer
* @i: The sector number (for a multi sector page)
*
* Support calculating of BCH4/8/16 ECC vectors for one sector
@@ -1259,7 +1260,7 @@ static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
* omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
* @chip: NAND chip object
* @dat: The pointer to data on which ecc is computed
- * @ecc_code: The ecc_code buffer
+ * @ecc_calc: Buffer storing the calculated ECC bytes
*
* Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
* when SW based correction is required as ECC is required for one sector
@@ -1275,7 +1276,7 @@ static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
* omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
- * @ecc_code: The ecc_code buffer
+ * @ecc_calc: Buffer storing the calculated ECC bytes
*
* Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
*/
@@ -1674,7 +1675,8 @@ static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
/**
* is_elm_present - checks for presence of ELM module by scanning DT nodes
- * @omap_nand_info: NAND device structure containing platform data
+ * @info: NAND device structure containing platform data
+ * @elm_node: ELM's DT node
*/
static bool is_elm_present(struct omap_nand_info *info,
struct device_node *elm_node)
@@ -1865,18 +1867,19 @@ static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
- if (section >= chip->ecc.steps)
+ if (section >= engine_conf->nsteps)
return -ERANGE;
/*
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- oobregion->offset = off + (section * (chip->ecc.bytes + 1));
- oobregion->length = chip->ecc.bytes;
+ oobregion->offset = off + (section * (engine_conf->code_size + 1));
+ oobregion->length = engine_conf->code_size;
return 0;
}
@@ -1884,7 +1887,8 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
if (section)
@@ -1894,7 +1898,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+ off += ((engine_conf->code_size + 1) * engine_conf->nsteps);
if (off >= mtd->oobsize)
return -ERANGE;
@@ -2041,16 +2045,16 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
chip->ecc.bytes = 7;
chip->ecc.strength = 4;
chip->ecc.hwctl = omap_enable_hwecc_bch;
- chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.correct = rawnand_sw_bch_correct;
chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = chip->ecc.bytes + 1;
/* Software BCH library is used for locating errors */
- chip->ecc.priv = nand_bch_init(mtd);
- if (!chip->ecc.priv) {
+ err = rawnand_sw_bch_init(chip);
+ if (err) {
dev_err(dev, "Unable to use BCH library\n");
- return -EINVAL;
+ return err;
}
break;
@@ -2083,16 +2087,16 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
chip->ecc.bytes = 13;
chip->ecc.strength = 8;
chip->ecc.hwctl = omap_enable_hwecc_bch;
- chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.correct = rawnand_sw_bch_correct;
chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = chip->ecc.bytes + 1;
/* Software BCH library is used for locating errors */
- chip->ecc.priv = nand_bch_init(mtd);
- if (!chip->ecc.priv) {
+ err = rawnand_sw_bch_init(chip);
+ if (err) {
dev_err(dev, "unable to use BCH library\n");
- return -EINVAL;
+ return err;
}
break;
@@ -2195,7 +2199,6 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip = &info->nand;
mtd = nand_to_mtd(nand_chip);
mtd->dev.parent = &pdev->dev;
- nand_chip->ecc.priv = NULL;
nand_set_flash_node(nand_chip, dev->of_node);
if (!mtd->name) {
@@ -2271,10 +2274,9 @@ cleanup_nand:
return_error:
if (!IS_ERR_OR_NULL(info->dma))
dma_release_channel(info->dma);
- if (nand_chip->ecc.priv) {
- nand_bch_free(nand_chip->ecc.priv);
- nand_chip->ecc.priv = NULL;
- }
+
+ rawnand_sw_bch_cleanup(nand_chip);
+
return err;
}
@@ -2285,10 +2287,8 @@ static int omap_nand_remove(struct platform_device *pdev)
struct omap_nand_info *info = mtd_to_omap(mtd);
int ret;
- if (nand_chip->ecc.priv) {
- nand_bch_free(nand_chip->ecc.priv);
- nand_chip->ecc.priv = NULL;
- }
+ rawnand_sw_bch_cleanup(nand_chip);
+
if (info->dma)
dma_release_channel(info->dma);
ret = mtd_device_unregister(mtd);
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 4b799521a427..550695a4c1ab 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -96,6 +96,9 @@ static u32 elm_read_reg(struct elm_info *info, int offset)
* elm_config - Configure ELM module
* @dev: ELM device
* @bch_type: Type of BCH ecc
+ * @ecc_steps: ECC steps to assign to config
+ * @ecc_step_size: ECC step size to assign to config
+ * @ecc_syndrome_size: ECC syndrome size to assign to config
*/
int elm_config(struct device *dev, enum bch_ecc bch_type,
int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
@@ -432,7 +435,7 @@ static int elm_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-/**
+/*
* elm_context_save
* saves ELM configurations to preserve them across Hardware powered-down
*/
@@ -480,7 +483,7 @@ static int elm_context_save(struct elm_info *info)
return 0;
}
-/**
+/*
* elm_context_restore
* writes configurations saved duing power-down back into ELM registers
*/
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
index 68c08772d7c2..789f33312c15 100644
--- a/drivers/mtd/nand/raw/pasemi_nand.c
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 777fb0de0680..667e4bfe369f 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -145,6 +145,7 @@
#define OP_PAGE_READ 0x2
#define OP_PAGE_READ_WITH_ECC 0x3
#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
+#define OP_PAGE_READ_ONFI_READ 0x5
#define OP_PROGRAM_PAGE 0x6
#define OP_PAGE_PROGRAM_WITH_ECC 0x7
#define OP_PROGRAM_PAGE_SPARE 0x9
@@ -460,12 +461,14 @@ struct qcom_nand_host {
* @ecc_modes - ecc mode for NAND
* @is_bam - whether NAND controller is using BAM
* @is_qpic - whether NAND CTRL is part of qpic IP
+ * @qpic_v2 - flag to indicate QPIC IP version 2
* @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
*/
struct qcom_nandc_props {
u32 ecc_modes;
bool is_bam;
bool is_qpic;
+ bool qpic_v2;
u32 dev_cmd_reg_start;
};
@@ -1164,7 +1167,13 @@ static int nandc_param(struct qcom_nand_host *host)
* in use. we configure the controller to perform a raw read of 512
* bytes to read onfi params
*/
- nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
+ if (nandc->props->qpic_v2)
+ nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ |
+ PAGE_ACC | LAST_PAGE);
+ else
+ nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ |
+ PAGE_ACC | LAST_PAGE);
+
nandc_set_reg(nandc, NAND_ADDR0, 0);
nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
@@ -1180,21 +1189,28 @@ static int nandc_param(struct qcom_nand_host *host)
| 1 << DEV0_CFG1_ECC_DISABLE);
nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
- /* configure CMD1 and VLD for ONFI param probing */
- nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
- (nandc->vld & ~READ_START_VLD));
- nandc_set_reg(nandc, NAND_DEV_CMD1,
- (nandc->cmd1 & ~(0xFF << READ_ADDR))
- | NAND_CMD_PARAM << READ_ADDR);
+ /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+ if (!nandc->props->qpic_v2) {
+ nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
+ (nandc->vld & ~READ_START_VLD));
+ nandc_set_reg(nandc, NAND_DEV_CMD1,
+ (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
+ }
nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
- nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
- nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+ if (!nandc->props->qpic_v2) {
+ nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+ nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+ }
+
nandc_set_read_loc(nandc, 0, 0, 512, 1);
- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ if (!nandc->props->qpic_v2) {
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ }
nandc->buf_count = 512;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
@@ -1205,8 +1221,10 @@ static int nandc_param(struct qcom_nand_host *host)
nandc->buf_count, 0);
/* restore CMD1 and VLD regs */
- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
+ if (!nandc->props->qpic_v2) {
+ write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
+ }
return 0;
}
@@ -1570,6 +1588,8 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int i;
+ nandc_read_buffer_sync(nandc, true);
+
for (i = 0; i < cw_cnt; i++) {
u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -2770,8 +2790,10 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
/* kill onenand */
if (!nandc->props->is_qpic)
nandc_write(nandc, SFLASHC_BURST_CFG, 0);
- nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
- NAND_DEV_CMD_VLD_VAL);
+
+ if (!nandc->props->qpic_v2)
+ nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
+ NAND_DEV_CMD_VLD_VAL);
/* enable ADM or BAM DMA */
if (nandc->props->is_bam) {
@@ -2791,8 +2813,10 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
}
/* save the original values of these registers */
- nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
- nandc->vld = NAND_DEV_CMD_VLD_VAL;
+ if (!nandc->props->qpic_v2) {
+ nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
+ nandc->vld = NAND_DEV_CMD_VLD_VAL;
+ }
return 0;
}
@@ -3050,6 +3074,14 @@ static const struct qcom_nandc_props ipq8074_nandc_props = {
.dev_cmd_reg_start = 0x7000,
};
+static const struct qcom_nandc_props sdx55_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+ .is_bam = true,
+ .is_qpic = true,
+ .qpic_v2 = true,
+ .dev_cmd_reg_start = 0x7000,
+};
+
/*
* data will hold a struct pointer containing more differences once we support
* more controller variants
@@ -3064,9 +3096,17 @@ static const struct of_device_id qcom_nandc_of_match[] = {
.data = &ipq4019_nandc_props,
},
{
+ .compatible = "qcom,ipq6018-nand",
+ .data = &ipq8074_nandc_props,
+ },
+ {
.compatible = "qcom,ipq8074-nand",
.data = &ipq8074_nandc_props,
},
+ {
+ .compatible = "qcom,sdx55-nand",
+ .data = &sdx55_nandc_props,
+ },
{}
};
MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
new file mode 100644
index 000000000000..796b678cb108
--- /dev/null
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -0,0 +1,1495 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Rockchip NAND Flash controller driver.
+ * Copyright (C) 2020 Rockchip Inc.
+ * Author: Yifeng Zhao <yifeng.zhao@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * NFC Page Data Layout:
+ * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data +
+ * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data +
+ * ......
+ * NAND Page Data Layout:
+ * 1024 * n data + m Bytes oob
+ * Original Bad Block Mask Location:
+ * First byte of oob(spare).
+ * nand_chip->oob_poi data layout:
+ * 4Bytes sys data + .... + 4Bytes sys data + ECC data.
+ */
+
+/* NAND controller register definition */
+#define NFC_READ (0)
+#define NFC_WRITE (1)
+
+#define NFC_FMCTL (0x00)
+#define FMCTL_CE_SEL_M 0xFF
+#define FMCTL_CE_SEL(x) (1 << (x))
+#define FMCTL_WP BIT(8)
+#define FMCTL_RDY BIT(9)
+
+#define NFC_FMWAIT (0x04)
+#define FLCTL_RST BIT(0)
+#define FLCTL_WR (1) /* 0: read, 1: write */
+#define FLCTL_XFER_ST BIT(2)
+#define FLCTL_XFER_EN BIT(3)
+#define FLCTL_ACORRECT BIT(10) /* Auto correct error bits. */
+#define FLCTL_XFER_READY BIT(20)
+#define FLCTL_XFER_SECTOR (22)
+#define FLCTL_TOG_FIX BIT(29)
+
+#define BCHCTL_BANK_M (7 << 5)
+#define BCHCTL_BANK (5)
+
+#define DMA_ST BIT(0)
+#define DMA_WR (1) /* 0: write, 1: read */
+#define DMA_EN BIT(2)
+#define DMA_AHB_SIZE (3) /* 0: 1, 1: 2, 2: 4 */
+#define DMA_BURST_SIZE (6) /* 0: 1, 3: 4, 5: 8, 7: 16 */
+#define DMA_INC_NUM (9) /* 1 - 16 */
+
+#define ECC_ERR_CNT(x, e) ((((x) >> (e).low) & (e).low_mask) |\
+ (((x) >> (e).high) & (e).high_mask) << (e).low_bn)
+#define INT_DMA BIT(0)
+#define NFC_BANK (0x800)
+#define NFC_BANK_STEP (0x100)
+#define BANK_DATA (0x00)
+#define BANK_ADDR (0x04)
+#define BANK_CMD (0x08)
+#define NFC_SRAM0 (0x1000)
+#define NFC_SRAM1 (0x1400)
+#define NFC_SRAM_SIZE (0x400)
+#define NFC_TIMEOUT (500000)
+#define NFC_MAX_OOB_PER_STEP 128
+#define NFC_MIN_OOB_PER_STEP 64
+#define MAX_DATA_SIZE 0xFFFC
+#define MAX_ADDRESS_CYC 6
+#define NFC_ECC_MAX_MODES 4
+#define NFC_MAX_NSELS (8) /* Some Socs only have 1 or 2 CSs. */
+#define NFC_SYS_DATA_SIZE (4) /* 4 bytes sys data in oob pre 1024 data.*/
+#define RK_DEFAULT_CLOCK_RATE (150 * 1000 * 1000) /* 150 Mhz */
+#define ACCTIMING(csrw, rwpw, rwcs) ((csrw) << 12 | (rwpw) << 5 | (rwcs))
+
+enum nfc_type {
+ NFC_V6,
+ NFC_V8,
+ NFC_V9,
+};
+
+/**
+ * struct rk_ecc_cnt_status: represent a ecc status data.
+ * @err_flag_bit: error flag bit index at register.
+ * @low: ECC count low bit index at register.
+ * @low_mask: mask bit.
+ * @low_bn: ECC count low bit number.
+ * @high: ECC count high bit index at register.
+ * @high_mask: mask bit
+ */
+struct ecc_cnt_status {
+ u8 err_flag_bit;
+ u8 low;
+ u8 low_mask;
+ u8 low_bn;
+ u8 high;
+ u8 high_mask;
+};
+
+/**
+ * @type: NFC version
+ * @ecc_strengths: ECC strengths
+ * @ecc_cfgs: ECC config values
+ * @flctl_off: FLCTL register offset
+ * @bchctl_off: BCHCTL register offset
+ * @dma_data_buf_off: DMA_DATA_BUF register offset
+ * @dma_oob_buf_off: DMA_OOB_BUF register offset
+ * @dma_cfg_off: DMA_CFG register offset
+ * @dma_st_off: DMA_ST register offset
+ * @bch_st_off: BCG_ST register offset
+ * @randmz_off: RANDMZ register offset
+ * @int_en_off: interrupt enable register offset
+ * @int_clr_off: interrupt clean register offset
+ * @int_st_off: interrupt status register offset
+ * @oob0_off: oob0 register offset
+ * @oob1_off: oob1 register offset
+ * @ecc0: represent ECC0 status data
+ * @ecc1: represent ECC1 status data
+ */
+struct nfc_cfg {
+ enum nfc_type type;
+ u8 ecc_strengths[NFC_ECC_MAX_MODES];
+ u32 ecc_cfgs[NFC_ECC_MAX_MODES];
+ u32 flctl_off;
+ u32 bchctl_off;
+ u32 dma_cfg_off;
+ u32 dma_data_buf_off;
+ u32 dma_oob_buf_off;
+ u32 dma_st_off;
+ u32 bch_st_off;
+ u32 randmz_off;
+ u32 int_en_off;
+ u32 int_clr_off;
+ u32 int_st_off;
+ u32 oob0_off;
+ u32 oob1_off;
+ struct ecc_cnt_status ecc0;
+ struct ecc_cnt_status ecc1;
+};
+
+struct rk_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+
+ u16 boot_blks;
+ u16 metadata_size;
+ u32 boot_ecc;
+ u32 timing;
+
+ u8 nsels;
+ u8 sels[0];
+ /* Nothing after this field. */
+};
+
+struct rk_nfc {
+ struct nand_controller controller;
+ const struct nfc_cfg *cfg;
+ struct device *dev;
+
+ struct clk *nfc_clk;
+ struct clk *ahb_clk;
+ void __iomem *regs;
+
+ u32 selected_bank;
+ u32 band_offset;
+ u32 cur_ecc;
+ u32 cur_timing;
+
+ struct completion done;
+ struct list_head chips;
+
+ u8 *page_buf;
+ u32 *oob_buf;
+ u32 page_buf_size;
+ u32 oob_buf_size;
+
+ unsigned long assigned_cs;
+};
+
+static inline struct rk_nfc_nand_chip *rk_nfc_to_rknand(struct nand_chip *chip)
+{
+ return container_of(chip, struct rk_nfc_nand_chip, chip);
+}
+
+static inline u8 *rk_nfc_buf_to_data_ptr(struct nand_chip *chip, const u8 *p, int i)
+{
+ return (u8 *)p + i * chip->ecc.size;
+}
+
+static inline u8 *rk_nfc_buf_to_oob_ptr(struct nand_chip *chip, int i)
+{
+ u8 *poi;
+
+ poi = chip->oob_poi + i * NFC_SYS_DATA_SIZE;
+
+ return poi;
+}
+
+static inline u8 *rk_nfc_buf_to_oob_ecc_ptr(struct nand_chip *chip, int i)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ u8 *poi;
+
+ poi = chip->oob_poi + rknand->metadata_size + chip->ecc.bytes * i;
+
+ return poi;
+}
+
+static inline int rk_nfc_data_len(struct nand_chip *chip)
+{
+ return chip->ecc.size + chip->ecc.bytes + NFC_SYS_DATA_SIZE;
+}
+
+static inline u8 *rk_nfc_data_ptr(struct nand_chip *chip, int i)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->page_buf + i * rk_nfc_data_len(chip);
+}
+
+static inline u8 *rk_nfc_oob_ptr(struct nand_chip *chip, int i)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->page_buf + i * rk_nfc_data_len(chip) + chip->ecc.size;
+}
+
+static int rk_nfc_hw_ecc_setup(struct nand_chip *chip, u32 strength)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg, i;
+
+ for (i = 0; i < NFC_ECC_MAX_MODES; i++) {
+ if (strength == nfc->cfg->ecc_strengths[i]) {
+ reg = nfc->cfg->ecc_cfgs[i];
+ break;
+ }
+ }
+
+ if (i >= NFC_ECC_MAX_MODES)
+ return -EINVAL;
+
+ writel(reg, nfc->regs + nfc->cfg->bchctl_off);
+
+ /* Save chip ECC setting */
+ nfc->cur_ecc = strength;
+
+ return 0;
+}
+
+static void rk_nfc_select_chip(struct nand_chip *chip, int cs)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u32 val;
+
+ if (cs < 0) {
+ nfc->selected_bank = -1;
+ /* Deselect the currently selected target. */
+ val = readl_relaxed(nfc->regs + NFC_FMCTL);
+ val &= ~FMCTL_CE_SEL_M;
+ writel(val, nfc->regs + NFC_FMCTL);
+ return;
+ }
+
+ nfc->selected_bank = rknand->sels[cs];
+ nfc->band_offset = NFC_BANK + nfc->selected_bank * NFC_BANK_STEP;
+
+ val = readl_relaxed(nfc->regs + NFC_FMCTL);
+ val &= ~FMCTL_CE_SEL_M;
+ val |= FMCTL_CE_SEL(nfc->selected_bank);
+
+ writel(val, nfc->regs + NFC_FMCTL);
+
+ /*
+ * Compare current chip timing with selected chip timing and
+ * change if needed.
+ */
+ if (nfc->cur_timing != rknand->timing) {
+ writel(rknand->timing, nfc->regs + NFC_FMWAIT);
+ nfc->cur_timing = rknand->timing;
+ }
+
+ /*
+ * Compare current chip ECC setting with selected chip ECC setting and
+ * change if needed.
+ */
+ if (nfc->cur_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, ecc->strength);
+}
+
+static inline int rk_nfc_wait_ioready(struct rk_nfc *nfc)
+{
+ int rc;
+ u32 val;
+
+ rc = readl_relaxed_poll_timeout(nfc->regs + NFC_FMCTL, val,
+ val & FMCTL_RDY, 10, NFC_TIMEOUT);
+
+ return rc;
+}
+
+static void rk_nfc_read_buf(struct rk_nfc *nfc, u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb_relaxed(nfc->regs + nfc->band_offset +
+ BANK_DATA);
+}
+
+static void rk_nfc_write_buf(struct rk_nfc *nfc, const u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], nfc->regs + nfc->band_offset + BANK_DATA);
+}
+
+static int rk_nfc_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ unsigned int i, j, remaining, start;
+ int reg_offset = nfc->band_offset;
+ u8 *inbuf = NULL;
+ const u8 *outbuf;
+ u32 cnt = 0;
+ int ret = 0;
+
+ for (i = 0; i < subop->ninstrs; i++) {
+ const struct nand_op_instr *instr = &subop->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ nfc->regs + reg_offset + BANK_CMD);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ remaining = nand_subop_get_num_addr_cyc(subop, i);
+ start = nand_subop_get_addr_start_off(subop, i);
+
+ for (j = 0; j < 8 && j + start < remaining; j++)
+ writeb(instr->ctx.addr.addrs[j + start],
+ nfc->regs + reg_offset + BANK_ADDR);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ start = nand_subop_get_data_start_off(subop, i);
+ cnt = nand_subop_get_data_len(subop, i);
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR) {
+ outbuf = instr->ctx.data.buf.out + start;
+ rk_nfc_write_buf(nfc, outbuf, cnt);
+ } else {
+ inbuf = instr->ctx.data.buf.in + start;
+ rk_nfc_read_buf(nfc, inbuf, cnt);
+ }
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ if (rk_nfc_wait_ioready(nfc) < 0) {
+ ret = -ETIMEDOUT;
+ dev_err(nfc->dev, "IO not ready\n");
+ }
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static const struct nand_op_parser rk_nfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ rk_nfc_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ rk_nfc_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, MAX_DATA_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+);
+
+static int rk_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only)
+ rk_nfc_select_chip(chip, op->cs);
+
+ return nand_op_parser_exec_op(chip, &rk_nfc_op_parser, op,
+ check_only);
+}
+
+static int rk_nfc_setup_interface(struct nand_chip *chip, int target,
+ const struct nand_interface_config *conf)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ const struct nand_sdr_timings *timings;
+ u32 rate, tc2rw, trwpw, trw2c;
+ u32 temp;
+
+ if (target < 0)
+ return 0;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -EOPNOTSUPP;
+
+ if (IS_ERR(nfc->nfc_clk))
+ rate = clk_get_rate(nfc->ahb_clk);
+ else
+ rate = clk_get_rate(nfc->nfc_clk);
+
+ /* Turn clock rate into kHz. */
+ rate /= 1000;
+
+ tc2rw = 1;
+ trw2c = 1;
+
+ trwpw = max(timings->tWC_min, timings->tRC_min) / 1000;
+ trwpw = DIV_ROUND_UP(trwpw * rate, 1000000);
+
+ temp = timings->tREA_max / 1000;
+ temp = DIV_ROUND_UP(temp * rate, 1000000);
+
+ if (trwpw < temp)
+ trwpw = temp;
+
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:18: reserved
+ * 17:12: csrw, clock cycles from the falling edge of CSn to the
+ * falling edge of RDn or WRn
+ * 11:11: reserved
+ * 10:05: rwpw, the width of RDn or WRn in processor clock cycles
+ * 04:00: rwcs, clock cycles from the rising edge of RDn or WRn to the
+ * rising edge of CSn
+ */
+
+ /* Save chip timing */
+ rknand->timing = ACCTIMING(tc2rw, trwpw, trw2c);
+
+ return 0;
+}
+
+static void rk_nfc_xfer_start(struct rk_nfc *nfc, u8 rw, u8 n_KB,
+ dma_addr_t dma_data, dma_addr_t dma_oob)
+{
+ u32 dma_reg, fl_reg, bch_reg;
+
+ dma_reg = DMA_ST | ((!rw) << DMA_WR) | DMA_EN | (2 << DMA_AHB_SIZE) |
+ (7 << DMA_BURST_SIZE) | (16 << DMA_INC_NUM);
+
+ fl_reg = (rw << FLCTL_WR) | FLCTL_XFER_EN | FLCTL_ACORRECT |
+ (n_KB << FLCTL_XFER_SECTOR) | FLCTL_TOG_FIX;
+
+ if (nfc->cfg->type == NFC_V6 || nfc->cfg->type == NFC_V8) {
+ bch_reg = readl_relaxed(nfc->regs + nfc->cfg->bchctl_off);
+ bch_reg = (bch_reg & (~BCHCTL_BANK_M)) |
+ (nfc->selected_bank << BCHCTL_BANK);
+ writel(bch_reg, nfc->regs + nfc->cfg->bchctl_off);
+ }
+
+ writel(dma_reg, nfc->regs + nfc->cfg->dma_cfg_off);
+ writel((u32)dma_data, nfc->regs + nfc->cfg->dma_data_buf_off);
+ writel((u32)dma_oob, nfc->regs + nfc->cfg->dma_oob_buf_off);
+ writel(fl_reg, nfc->regs + nfc->cfg->flctl_off);
+ fl_reg |= FLCTL_XFER_ST;
+ writel(fl_reg, nfc->regs + nfc->cfg->flctl_off);
+}
+
+static int rk_nfc_wait_for_xfer_done(struct rk_nfc *nfc)
+{
+ void __iomem *ptr;
+ u32 reg;
+
+ ptr = nfc->regs + nfc->cfg->flctl_off;
+
+ return readl_relaxed_poll_timeout(ptr, reg,
+ reg & FLCTL_XFER_READY,
+ 10, NFC_TIMEOUT);
+}
+
+static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_on, int page)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, pages_per_blk;
+
+ pages_per_blk = mtd->erasesize / mtd->writesize;
+ if ((chip->options & NAND_IS_BOOT_MEDIUM) &&
+ (page < (pages_per_blk * rknand->boot_blks)) &&
+ rknand->boot_ecc != ecc->strength) {
+ /*
+ * There's currently no method to notify the MTD framework that
+ * a different ECC strength is in use for the boot blocks.
+ */
+ return -EIO;
+ }
+
+ if (!buf)
+ memset(nfc->page_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ for (i = 0; i < ecc->steps; i++) {
+ /* Copy data to the NFC buffer. */
+ if (buf)
+ memcpy(rk_nfc_data_ptr(chip, i),
+ rk_nfc_buf_to_data_ptr(chip, buf, i),
+ ecc->size);
+ /*
+ * The first four bytes of OOB are reserved for the
+ * boot ROM. In some debugging cases, such as with a
+ * read, erase and write back test these 4 bytes stored
+ * in OOB also need to be written back.
+ *
+ * The function nand_block_bad detects bad blocks like:
+ *
+ * bad = chip->oob_poi[chip->badblockpos];
+ *
+ * chip->badblockpos == 0 for a large page NAND Flash,
+ * so chip->oob_poi[0] is the bad block mask (BBM).
+ *
+ * The OOB data layout on the NFC is:
+ *
+ * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * or
+ *
+ * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * The code here just swaps the first 4 bytes with the last
+ * 4 bytes without losing any data.
+ *
+ * The chip->oob_poi data layout:
+ *
+ * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
+ *
+ * The rk_nfc_ooblayout_free() function already has reserved
+ * these 4 bytes with:
+ *
+ * oob_region->offset = NFC_SYS_DATA_SIZE + 2;
+ */
+ if (!i)
+ memcpy(rk_nfc_oob_ptr(chip, i),
+ rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1),
+ NFC_SYS_DATA_SIZE);
+ else
+ memcpy(rk_nfc_oob_ptr(chip, i),
+ rk_nfc_buf_to_oob_ptr(chip, i - 1),
+ NFC_SYS_DATA_SIZE);
+ /* Copy ECC data to the NFC buffer. */
+ memcpy(rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE,
+ rk_nfc_buf_to_oob_ecc_ptr(chip, i),
+ ecc->bytes);
+ }
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ rk_nfc_write_buf(nfc, buf, mtd->writesize + mtd->oobsize);
+ return nand_prog_page_end_op(chip);
+}
+
+static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_on, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP :
+ NFC_MIN_OOB_PER_STEP;
+ int pages_per_blk = mtd->erasesize / mtd->writesize;
+ int ret = 0, i, boot_rom_mode = 0;
+ dma_addr_t dma_data, dma_oob;
+ u32 reg;
+ u8 *oob;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ if (buf)
+ memcpy(nfc->page_buf, buf, mtd->writesize);
+ else
+ memset(nfc->page_buf, 0xFF, mtd->writesize);
+
+ /*
+ * The first blocks (4, 8 or 16 depending on the device) are used
+ * by the boot ROM and the first 32 bits of OOB need to link to
+ * the next page address in the same block. We can't directly copy
+ * OOB data from the MTD framework, because this page address
+ * conflicts for example with the bad block marker (BBM),
+ * so we shift all OOB data including the BBM with 4 byte positions.
+ * As a consequence the OOB size available to the MTD framework is
+ * also reduced with 4 bytes.
+ *
+ * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * If a NAND is not a boot medium or the page is not a boot block,
+ * the first 4 bytes are left untouched by writing 0xFF to them.
+ *
+ * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * Configure the ECC algorithm supported by the boot ROM.
+ */
+ if ((page < (pages_per_blk * rknand->boot_blks)) &&
+ (chip->options & NAND_IS_BOOT_MEDIUM)) {
+ boot_rom_mode = 1;
+ if (rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc);
+ }
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (!i) {
+ reg = 0xFFFFFFFF;
+ } else {
+ oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+ reg = oob[0] | oob[1] << 8 | oob[2] << 16 |
+ oob[3] << 24;
+ }
+
+ if (!i && boot_rom_mode)
+ reg = (page & (pages_per_blk - 1)) * 4;
+
+ if (nfc->cfg->type == NFC_V9)
+ nfc->oob_buf[i] = reg;
+ else
+ nfc->oob_buf[i * (oob_step / 4)] = reg;
+ }
+
+ dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
+ mtd->writesize, DMA_TO_DEVICE);
+ dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
+ ecc->steps * oob_step,
+ DMA_TO_DEVICE);
+
+ reinit_completion(&nfc->done);
+ writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
+
+ rk_nfc_xfer_start(nfc, NFC_WRITE, ecc->steps, dma_data,
+ dma_oob);
+ ret = wait_for_completion_timeout(&nfc->done,
+ msecs_to_jiffies(100));
+ if (!ret)
+ dev_warn(nfc->dev, "write: wait dma done timeout.\n");
+ /*
+ * Whether the DMA transfer is completed or not. The driver
+ * needs to check the NFC`s status register to see if the data
+ * transfer was completed.
+ */
+ ret = rk_nfc_wait_for_xfer_done(nfc);
+
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
+ DMA_TO_DEVICE);
+ dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step,
+ DMA_TO_DEVICE);
+
+ if (boot_rom_mode && rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, ecc->strength);
+
+ if (ret) {
+ dev_err(nfc->dev, "write: wait transfer done timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int rk_nfc_write_oob(struct nand_chip *chip, int page)
+{
+ return rk_nfc_write_page_hwecc(chip, NULL, 1, page);
+}
+
+static int rk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on,
+ int page)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, pages_per_blk;
+
+ pages_per_blk = mtd->erasesize / mtd->writesize;
+ if ((chip->options & NAND_IS_BOOT_MEDIUM) &&
+ (page < (pages_per_blk * rknand->boot_blks)) &&
+ rknand->boot_ecc != ecc->strength) {
+ /*
+ * There's currently no method to notify the MTD framework that
+ * a different ECC strength is in use for the boot blocks.
+ */
+ return -EIO;
+ }
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ rk_nfc_read_buf(nfc, nfc->page_buf, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < ecc->steps; i++) {
+ /*
+ * The first four bytes of OOB are reserved for the
+ * boot ROM. In some debugging cases, such as with a read,
+ * erase and write back test, these 4 bytes also must be
+ * saved somewhere, otherwise this information will be
+ * lost during a write back.
+ */
+ if (!i)
+ memcpy(rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1),
+ rk_nfc_oob_ptr(chip, i),
+ NFC_SYS_DATA_SIZE);
+ else
+ memcpy(rk_nfc_buf_to_oob_ptr(chip, i - 1),
+ rk_nfc_oob_ptr(chip, i),
+ NFC_SYS_DATA_SIZE);
+
+ /* Copy ECC data from the NFC buffer. */
+ memcpy(rk_nfc_buf_to_oob_ecc_ptr(chip, i),
+ rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE,
+ ecc->bytes);
+
+ /* Copy data from the NFC buffer. */
+ if (buf)
+ memcpy(rk_nfc_buf_to_data_ptr(chip, buf, i),
+ rk_nfc_data_ptr(chip, i),
+ ecc->size);
+ }
+
+ return 0;
+}
+
+static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP :
+ NFC_MIN_OOB_PER_STEP;
+ int pages_per_blk = mtd->erasesize / mtd->writesize;
+ dma_addr_t dma_data, dma_oob;
+ int ret = 0, i, cnt, boot_rom_mode = 0;
+ int max_bitflips = 0, bch_st, ecc_fail = 0;
+ u8 *oob;
+ u32 tmp;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ dma_data = dma_map_single(nfc->dev, nfc->page_buf,
+ mtd->writesize,
+ DMA_FROM_DEVICE);
+ dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
+ ecc->steps * oob_step,
+ DMA_FROM_DEVICE);
+
+ /*
+ * The first blocks (4, 8 or 16 depending on the device)
+ * are used by the boot ROM.
+ * Configure the ECC algorithm supported by the boot ROM.
+ */
+ if ((page < (pages_per_blk * rknand->boot_blks)) &&
+ (chip->options & NAND_IS_BOOT_MEDIUM)) {
+ boot_rom_mode = 1;
+ if (rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc);
+ }
+
+ reinit_completion(&nfc->done);
+ writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
+ rk_nfc_xfer_start(nfc, NFC_READ, ecc->steps, dma_data,
+ dma_oob);
+ ret = wait_for_completion_timeout(&nfc->done,
+ msecs_to_jiffies(100));
+ if (!ret)
+ dev_warn(nfc->dev, "read: wait dma done timeout.\n");
+ /*
+ * Whether the DMA transfer is completed or not. The driver
+ * needs to check the NFC`s status register to see if the data
+ * transfer was completed.
+ */
+ ret = rk_nfc_wait_for_xfer_done(nfc);
+
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step,
+ DMA_FROM_DEVICE);
+
+ if (ret) {
+ ret = -ETIMEDOUT;
+ dev_err(nfc->dev, "read: wait transfer done timeout.\n");
+ goto timeout_err;
+ }
+
+ for (i = 1; i < ecc->steps; i++) {
+ oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+ if (nfc->cfg->type == NFC_V9)
+ tmp = nfc->oob_buf[i];
+ else
+ tmp = nfc->oob_buf[i * (oob_step / 4)];
+ *oob++ = (u8)tmp;
+ *oob++ = (u8)(tmp >> 8);
+ *oob++ = (u8)(tmp >> 16);
+ *oob++ = (u8)(tmp >> 24);
+ }
+
+ for (i = 0; i < (ecc->steps / 2); i++) {
+ bch_st = readl_relaxed(nfc->regs +
+ nfc->cfg->bch_st_off + i * 4);
+ if (bch_st & BIT(nfc->cfg->ecc0.err_flag_bit) ||
+ bch_st & BIT(nfc->cfg->ecc1.err_flag_bit)) {
+ mtd->ecc_stats.failed++;
+ ecc_fail = 1;
+ } else {
+ cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc0);
+ mtd->ecc_stats.corrected += cnt;
+ max_bitflips = max_t(u32, max_bitflips, cnt);
+
+ cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc1);
+ mtd->ecc_stats.corrected += cnt;
+ max_bitflips = max_t(u32, max_bitflips, cnt);
+ }
+ }
+
+ if (buf)
+ memcpy(buf, nfc->page_buf, mtd->writesize);
+
+timeout_err:
+ if (boot_rom_mode && rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, ecc->strength);
+
+ if (ret)
+ return ret;
+
+ if (ecc_fail) {
+ dev_err(nfc->dev, "read page: %x ecc error!\n", page);
+ return 0;
+ }
+
+ return max_bitflips;
+}
+
+static int rk_nfc_read_oob(struct nand_chip *chip, int page)
+{
+ return rk_nfc_read_page_hwecc(chip, NULL, 1, page);
+}
+
+static inline void rk_nfc_hw_init(struct rk_nfc *nfc)
+{
+ /* Disable flash wp. */
+ writel(FMCTL_WP, nfc->regs + NFC_FMCTL);
+ /* Config default timing 40ns at 150 Mhz NFC clock. */
+ writel(0x1081, nfc->regs + NFC_FMWAIT);
+ nfc->cur_timing = 0x1081;
+ /* Disable randomizer and DMA. */
+ writel(0, nfc->regs + nfc->cfg->randmz_off);
+ writel(0, nfc->regs + nfc->cfg->dma_cfg_off);
+ writel(FLCTL_RST, nfc->regs + nfc->cfg->flctl_off);
+}
+
+static irqreturn_t rk_nfc_irq(int irq, void *id)
+{
+ struct rk_nfc *nfc = id;
+ u32 sta, ien;
+
+ sta = readl_relaxed(nfc->regs + nfc->cfg->int_st_off);
+ ien = readl_relaxed(nfc->regs + nfc->cfg->int_en_off);
+
+ if (!(sta & ien))
+ return IRQ_NONE;
+
+ writel(sta, nfc->regs + nfc->cfg->int_clr_off);
+ writel(~sta & ien, nfc->regs + nfc->cfg->int_en_off);
+
+ complete(&nfc->done);
+
+ return IRQ_HANDLED;
+}
+
+static int rk_nfc_enable_clks(struct device *dev, struct rk_nfc *nfc)
+{
+ int ret;
+
+ if (!IS_ERR(nfc->nfc_clk)) {
+ ret = clk_prepare_enable(nfc->nfc_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable NFC clk\n");
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(nfc->ahb_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ahb clk\n");
+ if (!IS_ERR(nfc->nfc_clk))
+ clk_disable_unprepare(nfc->nfc_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rk_nfc_disable_clks(struct rk_nfc *nfc)
+{
+ if (!IS_ERR(nfc->nfc_clk))
+ clk_disable_unprepare(nfc->nfc_clk);
+ clk_disable_unprepare(nfc->ahb_clk);
+}
+
+static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * The beginning of the OOB area stores the reserved data for the NFC,
+ * the size of the reserved data is NFC_SYS_DATA_SIZE bytes.
+ */
+ oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
+ oob_region->offset = NFC_SYS_DATA_SIZE + 2;
+
+ return 0;
+}
+
+static int rk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = mtd->oobsize - rknand->metadata_size;
+ oob_region->offset = rknand->metadata_size;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops rk_nfc_ooblayout_ops = {
+ .free = rk_nfc_ooblayout_free,
+ .ecc = rk_nfc_ooblayout_ecc,
+};
+
+static int rk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ const u8 *strengths = nfc->cfg->ecc_strengths;
+ u8 max_strength, nfc_max_strength;
+ int i;
+
+ nfc_max_strength = nfc->cfg->ecc_strengths[0];
+ /* If optional dt settings not present. */
+ if (!ecc->size || !ecc->strength ||
+ ecc->strength > nfc_max_strength) {
+ chip->ecc.size = 1024;
+ ecc->steps = mtd->writesize / ecc->size;
+
+ /*
+ * HW ECC always requests the number of ECC bytes per 1024 byte
+ * blocks. The first 4 OOB bytes are reserved for sys data.
+ */
+ max_strength = ((mtd->oobsize / ecc->steps) - 4) * 8 /
+ fls(8 * 1024);
+ if (max_strength > nfc_max_strength)
+ max_strength = nfc_max_strength;
+
+ for (i = 0; i < 4; i++) {
+ if (max_strength >= strengths[i])
+ break;
+ }
+
+ if (i >= 4) {
+ dev_err(nfc->dev, "unsupported ECC strength\n");
+ return -EOPNOTSUPP;
+ }
+
+ ecc->strength = strengths[i];
+ }
+ ecc->steps = mtd->writesize / ecc->size;
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * chip->ecc.size), 8);
+
+ return 0;
+}
+
+static int rk_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int new_page_len, new_oob_len;
+ void *buf;
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16 bits bus width not supported");
+ return -EINVAL;
+ }
+
+ if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ ret = rk_nfc_ecc_init(dev, mtd);
+ if (ret)
+ return ret;
+
+ rknand->metadata_size = NFC_SYS_DATA_SIZE * ecc->steps;
+
+ if (rknand->metadata_size < NFC_SYS_DATA_SIZE + 2) {
+ dev_err(dev,
+ "driver needs at least %d bytes of meta data\n",
+ NFC_SYS_DATA_SIZE + 2);
+ return -EIO;
+ }
+
+ /* Check buffer first, avoid duplicate alloc buffer. */
+ new_page_len = mtd->writesize + mtd->oobsize;
+ if (nfc->page_buf && new_page_len > nfc->page_buf_size) {
+ buf = krealloc(nfc->page_buf, new_page_len,
+ GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+ nfc->page_buf = buf;
+ nfc->page_buf_size = new_page_len;
+ }
+
+ new_oob_len = ecc->steps * NFC_MAX_OOB_PER_STEP;
+ if (nfc->oob_buf && new_oob_len > nfc->oob_buf_size) {
+ buf = krealloc(nfc->oob_buf, new_oob_len,
+ GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ kfree(nfc->page_buf);
+ nfc->page_buf = NULL;
+ return -ENOMEM;
+ }
+ nfc->oob_buf = buf;
+ nfc->oob_buf_size = new_oob_len;
+ }
+
+ if (!nfc->page_buf) {
+ nfc->page_buf = kzalloc(new_page_len, GFP_KERNEL | GFP_DMA);
+ if (!nfc->page_buf)
+ return -ENOMEM;
+ nfc->page_buf_size = new_page_len;
+ }
+
+ if (!nfc->oob_buf) {
+ nfc->oob_buf = kzalloc(new_oob_len, GFP_KERNEL | GFP_DMA);
+ if (!nfc->oob_buf) {
+ kfree(nfc->page_buf);
+ nfc->page_buf = NULL;
+ return -ENOMEM;
+ }
+ nfc->oob_buf_size = new_oob_len;
+ }
+
+ chip->ecc.write_page_raw = rk_nfc_write_page_raw;
+ chip->ecc.write_page = rk_nfc_write_page_hwecc;
+ chip->ecc.write_oob = rk_nfc_write_oob;
+
+ chip->ecc.read_page_raw = rk_nfc_read_page_raw;
+ chip->ecc.read_page = rk_nfc_read_page_hwecc;
+ chip->ecc.read_oob = rk_nfc_read_oob;
+
+ return 0;
+}
+
+static const struct nand_controller_ops rk_nfc_controller_ops = {
+ .attach_chip = rk_nfc_attach_chip,
+ .exec_op = rk_nfc_exec_op,
+ .setup_interface = rk_nfc_setup_interface,
+};
+
+static int rk_nfc_nand_chip_init(struct device *dev, struct rk_nfc *nfc,
+ struct device_node *np)
+{
+ struct rk_nfc_nand_chip *rknand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int nsels;
+ u32 tmp;
+ int ret;
+ int i;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -ENODEV;
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > NFC_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ rknand = devm_kzalloc(dev, sizeof(*rknand) + nsels * sizeof(u8),
+ GFP_KERNEL);
+ if (!rknand)
+ return -ENOMEM;
+
+ rknand->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+
+ if (tmp >= NFC_MAX_NSELS) {
+ dev_err(dev, "invalid CS: %u\n", tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %u already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ rknand->sels[i] = tmp;
+ }
+
+ chip = &rknand->chip;
+ chip->controller = &nfc->controller;
+
+ nand_set_flash_node(chip, np);
+
+ nand_set_controller_data(chip, nfc);
+
+ chip->options |= NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE;
+ chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+
+ /* Set default mode in case dt entry is missing. */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ mtd = nand_to_mtd(chip);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ if (!mtd->name) {
+ dev_err(nfc->dev, "NAND label property is mandatory\n");
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &rk_nfc_ooblayout_ops);
+ rk_nfc_hw_init(nfc);
+ ret = nand_scan(chip, nsels);
+ if (ret)
+ return ret;
+
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ ret = of_property_read_u32(np, "rockchip,boot-blks", &tmp);
+ rknand->boot_blks = ret ? 0 : tmp;
+
+ ret = of_property_read_u32(np, "rockchip,boot-ecc-strength",
+ &tmp);
+ rknand->boot_ecc = ret ? chip->ecc.strength : tmp;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "MTD parse partition error\n");
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&rknand->node, &nfc->chips);
+
+ return 0;
+}
+
+static void rk_nfc_chips_cleanup(struct rk_nfc *nfc)
+{
+ struct rk_nfc_nand_chip *rknand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(rknand, tmp, &nfc->chips, node) {
+ chip = &rknand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&rknand->node);
+ }
+}
+
+static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
+{
+ struct device_node *np = dev->of_node, *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (!nchips || nchips > NFC_MAX_NSELS) {
+ dev_err(nfc->dev, "incorrect number of NAND chips (%d)\n",
+ nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = rk_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ rk_nfc_chips_cleanup(nfc);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct nfc_cfg nfc_v6_cfg = {
+ .type = NFC_V6,
+ .ecc_strengths = {60, 40, 24, 16},
+ .ecc_cfgs = {
+ 0x00040011, 0x00040001, 0x00000011, 0x00000001,
+ },
+ .flctl_off = 0x08,
+ .bchctl_off = 0x0C,
+ .dma_cfg_off = 0x10,
+ .dma_data_buf_off = 0x14,
+ .dma_oob_buf_off = 0x18,
+ .dma_st_off = 0x1C,
+ .bch_st_off = 0x20,
+ .randmz_off = 0x150,
+ .int_en_off = 0x16C,
+ .int_clr_off = 0x170,
+ .int_st_off = 0x174,
+ .oob0_off = 0x200,
+ .oob1_off = 0x230,
+ .ecc0 = {
+ .err_flag_bit = 2,
+ .low = 3,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 27,
+ .high_mask = 0x1,
+ },
+ .ecc1 = {
+ .err_flag_bit = 15,
+ .low = 16,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 29,
+ .high_mask = 0x1,
+ },
+};
+
+static struct nfc_cfg nfc_v8_cfg = {
+ .type = NFC_V8,
+ .ecc_strengths = {16, 16, 16, 16},
+ .ecc_cfgs = {
+ 0x00000001, 0x00000001, 0x00000001, 0x00000001,
+ },
+ .flctl_off = 0x08,
+ .bchctl_off = 0x0C,
+ .dma_cfg_off = 0x10,
+ .dma_data_buf_off = 0x14,
+ .dma_oob_buf_off = 0x18,
+ .dma_st_off = 0x1C,
+ .bch_st_off = 0x20,
+ .randmz_off = 0x150,
+ .int_en_off = 0x16C,
+ .int_clr_off = 0x170,
+ .int_st_off = 0x174,
+ .oob0_off = 0x200,
+ .oob1_off = 0x230,
+ .ecc0 = {
+ .err_flag_bit = 2,
+ .low = 3,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 27,
+ .high_mask = 0x1,
+ },
+ .ecc1 = {
+ .err_flag_bit = 15,
+ .low = 16,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 29,
+ .high_mask = 0x1,
+ },
+};
+
+static struct nfc_cfg nfc_v9_cfg = {
+ .type = NFC_V9,
+ .ecc_strengths = {70, 60, 40, 16},
+ .ecc_cfgs = {
+ 0x00000001, 0x06000001, 0x04000001, 0x02000001,
+ },
+ .flctl_off = 0x10,
+ .bchctl_off = 0x20,
+ .dma_cfg_off = 0x30,
+ .dma_data_buf_off = 0x34,
+ .dma_oob_buf_off = 0x38,
+ .dma_st_off = 0x3C,
+ .bch_st_off = 0x150,
+ .randmz_off = 0x208,
+ .int_en_off = 0x120,
+ .int_clr_off = 0x124,
+ .int_st_off = 0x128,
+ .oob0_off = 0x200,
+ .oob1_off = 0x204,
+ .ecc0 = {
+ .err_flag_bit = 2,
+ .low = 3,
+ .low_mask = 0x7F,
+ .low_bn = 7,
+ .high = 0,
+ .high_mask = 0x0,
+ },
+ .ecc1 = {
+ .err_flag_bit = 18,
+ .low = 19,
+ .low_mask = 0x7F,
+ .low_bn = 7,
+ .high = 0,
+ .high_mask = 0x0,
+ },
+};
+
+static const struct of_device_id rk_nfc_id_table[] = {
+ {
+ .compatible = "rockchip,px30-nfc",
+ .data = &nfc_v9_cfg
+ },
+ {
+ .compatible = "rockchip,rk2928-nfc",
+ .data = &nfc_v6_cfg
+ },
+ {
+ .compatible = "rockchip,rv1108-nfc",
+ .data = &nfc_v8_cfg
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rk_nfc_id_table);
+
+static int rk_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rk_nfc *nfc;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ nfc->controller.ops = &rk_nfc_controller_ops;
+
+ nfc->cfg = of_device_get_match_data(dev);
+ nfc->dev = dev;
+
+ init_completion(&nfc->done);
+
+ nfc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->regs)) {
+ ret = PTR_ERR(nfc->regs);
+ goto release_nfc;
+ }
+
+ nfc->nfc_clk = devm_clk_get(dev, "nfc");
+ if (IS_ERR(nfc->nfc_clk)) {
+ dev_dbg(dev, "no NFC clk\n");
+ /* Some earlier models, such as rk3066, have no NFC clk. */
+ }
+
+ nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(nfc->ahb_clk)) {
+ dev_err(dev, "no ahb clk\n");
+ ret = PTR_ERR(nfc->ahb_clk);
+ goto release_nfc;
+ }
+
+ ret = rk_nfc_enable_clks(dev, nfc);
+ if (ret)
+ goto release_nfc;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no NFC irq resource\n");
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ writel(0, nfc->regs + nfc->cfg->int_en_off);
+ ret = devm_request_irq(dev, irq, rk_nfc_irq, 0x0, "rk-nand", nfc);
+ if (ret) {
+ dev_err(dev, "failed to request NFC irq\n");
+ goto clk_disable;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = rk_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init NAND chips\n");
+ goto clk_disable;
+ }
+ return 0;
+
+clk_disable:
+ rk_nfc_disable_clks(nfc);
+release_nfc:
+ return ret;
+}
+
+static int rk_nfc_remove(struct platform_device *pdev)
+{
+ struct rk_nfc *nfc = platform_get_drvdata(pdev);
+
+ kfree(nfc->page_buf);
+ kfree(nfc->oob_buf);
+ rk_nfc_chips_cleanup(nfc);
+ rk_nfc_disable_clks(nfc);
+
+ return 0;
+}
+
+static int __maybe_unused rk_nfc_suspend(struct device *dev)
+{
+ struct rk_nfc *nfc = dev_get_drvdata(dev);
+
+ rk_nfc_disable_clks(nfc);
+
+ return 0;
+}
+
+static int __maybe_unused rk_nfc_resume(struct device *dev)
+{
+ struct rk_nfc *nfc = dev_get_drvdata(dev);
+ struct rk_nfc_nand_chip *rknand;
+ struct nand_chip *chip;
+ int ret;
+ u32 i;
+
+ ret = rk_nfc_enable_clks(dev, nfc);
+ if (ret)
+ return ret;
+
+ /* Reset NAND chip if VCC was powered off. */
+ list_for_each_entry(rknand, &nfc->chips, node) {
+ chip = &rknand->chip;
+ for (i = 0; i < rknand->nsels; i++)
+ nand_reset(chip, i);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops rk_nfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rk_nfc_suspend, rk_nfc_resume)
+};
+
+static struct platform_driver rk_nfc_driver = {
+ .probe = rk_nfc_probe,
+ .remove = rk_nfc_remove,
+ .driver = {
+ .name = "rockchip-nfc",
+ .of_match_table = rk_nfc_id_table,
+ .pm = &rk_nfc_pm_ops,
+ },
+};
+
+module_platform_driver(rk_nfc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Yifeng Zhao <yifeng.zhao@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip Nand Flash Controller Driver");
+MODULE_ALIAS("platform:rockchip-nand-controller");
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index fbd0fa48e063..f0a4535c812a 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -30,7 +30,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_data/mtd-nand-s3c2410.h>
@@ -134,7 +133,8 @@ enum s3c_nand_clk_state {
/**
* struct s3c2410_nand_info - NAND controller state.
- * @mtds: An array of MTD instances on this controoler.
+ * @controller: Base controller structure.
+ * @mtds: An array of MTD instances on this controller.
* @platform: The platform data for this board.
* @device: The platform device we bound to.
* @clk: The clock resource for this controller.
@@ -146,6 +146,7 @@ enum s3c_nand_clk_state {
* @clk_rate: The clock rate from @clk.
* @clk_state: The current clock state.
* @cpu_type: The exact type of this controller.
+ * @freq_transition: CPUFreq notifier block
*/
struct s3c2410_nand_info {
/* mtd info */
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index af98bcc9d689..5612ee628425 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/sharpsl.h>
#include <linux/interrupt.h>
@@ -107,7 +106,7 @@ static int sharpsl_attach_chip(struct nand_chip *chip)
chip->ecc.strength = 1;
chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
chip->ecc.calculate = sharpsl_nand_calculate_ecc;
- chip->ecc.correct = nand_correct_data;
+ chip->ecc.correct = rawnand_sw_hamming_correct;
return 0;
}
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 2a7ca3072f35..923a9e236fcf 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -51,6 +51,7 @@
#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
#define NFC_REG_SPARE_AREA 0x00A0
#define NFC_REG_PAT_ID 0x00A4
+#define NFC_REG_MDMA_ADDR 0x00C0
#define NFC_REG_MDMA_CNT 0x00C4
#define NFC_RAM0_BASE 0x0400
#define NFC_RAM1_BASE 0x0800
@@ -182,6 +183,7 @@ struct sunxi_nand_hw_ecc {
*
* @node: used to store NAND chips into a list
* @nand: base NAND chip structure
+ * @ecc: ECC controller structure
* @clk_rate: clk_rate required for this NAND chip
* @timing_cfg: TIMING_CFG register value for this NAND chip
* @timing_ctl: TIMING_CTL register value for this NAND chip
@@ -191,6 +193,7 @@ struct sunxi_nand_hw_ecc {
struct sunxi_nand_chip {
struct list_head node;
struct nand_chip nand;
+ struct sunxi_nand_hw_ecc *ecc;
unsigned long clk_rate;
u32 timing_cfg;
u32 timing_ctl;
@@ -207,13 +210,13 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
* NAND Controller capabilities structure: stores NAND controller capabilities
* for distinction between compatible strings.
*
- * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM
+ * @has_mdma: Use mbus dma mode, otherwise general dma
* through MBUS on A23/A33 needs extra configuration.
* @reg_io_data: I/O data register
* @dma_maxburst: DMA maxburst
*/
struct sunxi_nfc_caps {
- bool extra_mbus_conf;
+ bool has_mdma;
unsigned int reg_io_data;
unsigned int dma_maxburst;
};
@@ -233,6 +236,7 @@ struct sunxi_nfc_caps {
* controller
* @complete: a completion object used to wait for NAND controller events
* @dmac: the DMA channel attached to the NAND controller
+ * @caps: NAND Controller capabilities
*/
struct sunxi_nfc {
struct nand_controller controller;
@@ -363,24 +367,31 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
if (!ret)
return -ENOMEM;
- dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
- if (!dmad) {
- ret = -EINVAL;
- goto err_unmap_buf;
+ if (!nfc->caps->has_mdma) {
+ dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
+ if (!dmad) {
+ ret = -EINVAL;
+ goto err_unmap_buf;
+ }
}
writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
nfc->regs + NFC_REG_CTL);
writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
writel(chunksize, nfc->regs + NFC_REG_CNT);
- if (nfc->caps->extra_mbus_conf)
- writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
- dmat = dmaengine_submit(dmad);
+ if (nfc->caps->has_mdma) {
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_DMA_TYPE_NORMAL,
+ nfc->regs + NFC_REG_CTL);
+ writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
+ writel(sg_dma_address(sg), nfc->regs + NFC_REG_MDMA_ADDR);
+ } else {
+ dmat = dmaengine_submit(dmad);
- ret = dma_submit_error(dmat);
- if (ret)
- goto err_clr_dma_flag;
+ ret = dma_submit_error(dmat);
+ if (ret)
+ goto err_clr_dma_flag;
+ }
return 0;
@@ -676,15 +687,15 @@ static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf,
static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand)
{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
- struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
u32 ecc_ctl;
ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
NFC_ECC_BLOCK_SIZE_MSK);
- ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
- NFC_ECC_PIPELINE;
+ ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(sunxi_nand->ecc->mode) |
+ NFC_ECC_EXCEPTION | NFC_ECC_PIPELINE;
if (nand->ecc.size == 512)
ecc_ctl |= NFC_ECC_BLOCK_512;
@@ -911,7 +922,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf
unsigned int max_bitflips = 0;
int ret, i, raw_mode = 0;
struct scatterlist sg;
- u32 status;
+ u32 status, wait;
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -929,13 +940,18 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf
writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
- dma_async_issue_pending(nfc->dmac);
+ wait = NFC_CMD_INT_FLAG;
+
+ if (nfc->caps->has_mdma)
+ wait |= NFC_DMA_INT_FLAG;
+ else
+ dma_async_issue_pending(nfc->dmac);
writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
- if (ret)
+ ret = sunxi_nfc_wait_events(nfc, wait, false, 0);
+ if (ret && !nfc->caps->has_mdma)
dmaengine_terminate_all(nfc->dmac);
sunxi_nfc_randomizer_disable(nand);
@@ -1276,6 +1292,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand,
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
struct scatterlist sg;
+ u32 wait;
int ret, i;
sunxi_nfc_select_chip(nand, nand->cur_cs);
@@ -1304,14 +1321,19 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand,
writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
nfc->regs + NFC_REG_WCMD_SET);
- dma_async_issue_pending(nfc->dmac);
+ wait = NFC_CMD_INT_FLAG;
+
+ if (nfc->caps->has_mdma)
+ wait |= NFC_DMA_INT_FLAG;
+ else
+ dma_async_issue_pending(nfc->dmac);
writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
NFC_DATA_TRANS | NFC_ACCESS_DIR,
nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
- if (ret)
+ ret = sunxi_nfc_wait_events(nfc, wait, false, 0);
+ if (ret && !nfc->caps->has_mdma)
dmaengine_terminate_all(nfc->dmac);
sunxi_nfc_randomizer_disable(nand);
@@ -1597,9 +1619,9 @@ static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
.free = sunxi_nand_ooblayout_free,
};
-static void sunxi_nand_hw_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
+static void sunxi_nand_hw_ecc_ctrl_cleanup(struct sunxi_nand_chip *sunxi_nand)
{
- kfree(ecc->priv);
+ kfree(sunxi_nand->ecc);
}
static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
@@ -1607,10 +1629,10 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
struct device_node *np)
{
static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_device *nanddev = mtd_to_nanddev(mtd);
- struct sunxi_nand_hw_ecc *data;
int nsectors;
int ret;
int i;
@@ -1647,8 +1669,8 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
if (ecc->size != 512 && ecc->size != 1024)
return -EINVAL;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
+ sunxi_nand->ecc = kzalloc(sizeof(*sunxi_nand->ecc), GFP_KERNEL);
+ if (!sunxi_nand->ecc)
return -ENOMEM;
/* Prefer 1k ECC chunk over 512 ones */
@@ -1675,7 +1697,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
goto err;
}
- data->mode = i;
+ sunxi_nand->ecc->mode = i;
/* HW ECC always request ECC bytes for 1024 bytes blocks */
ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
@@ -1693,9 +1715,8 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
ecc->read_oob = sunxi_nfc_hw_ecc_read_oob;
ecc->write_oob = sunxi_nfc_hw_ecc_write_oob;
mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
- ecc->priv = data;
- if (nfc->dmac) {
+ if (nfc->dmac || nfc->caps->has_mdma) {
ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
@@ -1714,16 +1735,18 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
return 0;
err:
- kfree(data);
+ kfree(sunxi_nand->ecc);
return ret;
}
-static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
+static void sunxi_nand_ecc_cleanup(struct sunxi_nand_chip *sunxi_nand)
{
+ struct nand_ecc_ctrl *ecc = &sunxi_nand->nand.ecc;
+
switch (ecc->engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
- sunxi_nand_hw_ecc_ctrl_cleanup(ecc);
+ sunxi_nand_hw_ecc_ctrl_cleanup(sunxi_nand);
break;
case NAND_ECC_ENGINE_TYPE_NONE:
default:
@@ -2053,11 +2076,41 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
- sunxi_nand_ecc_cleanup(&chip->ecc);
+ sunxi_nand_ecc_cleanup(sunxi_nand);
list_del(&sunxi_nand->node);
}
}
+static int sunxi_nfc_dma_init(struct sunxi_nfc *nfc, struct resource *r)
+{
+ int ret;
+
+ if (nfc->caps->has_mdma)
+ return 0;
+
+ nfc->dmac = dma_request_chan(nfc->dev, "rxtx");
+ if (IS_ERR(nfc->dmac)) {
+ ret = PTR_ERR(nfc->dmac);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /* Ignore errors to fall back to PIO mode */
+ dev_warn(nfc->dev, "failed to request rxtx DMA channel: %d\n", ret);
+ nfc->dmac = NULL;
+ } else {
+ struct dma_slave_config dmac_cfg = { };
+
+ dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
+ dmac_cfg.dst_addr = dmac_cfg.src_addr;
+ dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
+ dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
+ dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
+ dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+ }
+ return 0;
+}
+
static int sunxi_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2132,30 +2185,10 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (ret)
goto out_ahb_reset_reassert;
- nfc->dmac = dma_request_chan(dev, "rxtx");
- if (IS_ERR(nfc->dmac)) {
- ret = PTR_ERR(nfc->dmac);
- if (ret == -EPROBE_DEFER)
- goto out_ahb_reset_reassert;
-
- /* Ignore errors to fall back to PIO mode */
- dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret);
- nfc->dmac = NULL;
- } else {
- struct dma_slave_config dmac_cfg = { };
-
- dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
- dmac_cfg.dst_addr = dmac_cfg.src_addr;
- dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
- dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
- dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
- dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+ ret = sunxi_nfc_dma_init(nfc, r);
- if (nfc->caps->extra_mbus_conf)
- writel(readl(nfc->regs + NFC_REG_CTL) |
- NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
- }
+ if (ret)
+ goto out_ahb_reset_reassert;
platform_set_drvdata(pdev, nfc);
@@ -2202,7 +2235,7 @@ static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
};
static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
- .extra_mbus_conf = true,
+ .has_mdma = true,
.reg_io_data = NFC_REG_A23_IO_DATA,
.dma_maxburst = 8,
};
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index aa6c7e7bbf1b..de8e919d0ebe 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -35,7 +35,6 @@
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
@@ -293,11 +292,11 @@ static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
int r0, r1;
/* assume ecc.size = 512 and ecc.bytes = 6 */
- r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256, false);
+ r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc);
if (r0 < 0)
return r0;
- r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256,
- false);
+ r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3,
+ calc_ecc + 3);
if (r1 < 0)
return r1;
return r0 + r1;
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index fe8ed2441588..1a9449e53bf9 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -14,7 +14,6 @@
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/platform_data/txx9/ndfmc.h>
@@ -194,8 +193,8 @@ static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
int stat;
for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
- stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256,
- false);
+ stat = rawnand_sw_hamming_correct(chip, buf, read_ecc,
+ calc_ecc);
if (stat < 0)
return stat;
corrected += stat;
diff --git a/drivers/mtd/nand/spi/Kconfig b/drivers/mtd/nand/spi/Kconfig
index da89b250df7c..3d7649a2dd72 100644
--- a/drivers/mtd/nand/spi/Kconfig
+++ b/drivers/mtd/nand/spi/Kconfig
@@ -2,6 +2,7 @@
menuconfig MTD_SPI_NAND
tristate "SPI NAND device Support"
select MTD_NAND_CORE
+ select MTD_NAND_ECC
depends on SPI_MASTER
select SPI_MEM
help
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index c35221794645..61d932c1b718 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -193,6 +193,135 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (spinand->eccinfo.get_status)
+ return spinand->eccinfo.get_status(spinand, status);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * We have no way to know exactly how many bitflips have been
+ * fixed, so let's return the maximum possible value so that
+ * wear-leveling layers move the data immediately.
+ */
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
+ .ecc = spinand_noecc_ooblayout_ecc,
+ .free = spinand_noecc_ooblayout_free,
+};
+
+static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct spinand_ondie_ecc_conf *engine_conf;
+
+ nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
+ nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
+ nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
+
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
+ if (!engine_conf)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = engine_conf;
+
+ if (spinand->eccinfo.ooblayout)
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
+ else
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+
+ return 0;
+}
+
+static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ kfree(nand->ecc.ctx.priv);
+}
+
+static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ bool enable = (req->mode != MTD_OPS_RAW);
+
+ /* Only enable or disable the engine */
+ return spinand_ecc_enable(spinand, enable);
+}
+
+static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
+ struct spinand_device *spinand = nand_to_spinand(nand);
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* Nothing to do when finishing a page write */
+ if (req->type == NAND_PAGE_WRITE)
+ return 0;
+
+ /* Finish a page write: check the status, report errors/bitflips */
+ return spinand_check_ecc_status(spinand, engine_conf->status);
+}
+
+static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
+ .init_ctx = spinand_ondie_ecc_init_ctx,
+ .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
+ .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
+ .finish_io_req = spinand_ondie_ecc_finish_io_req,
+};
+
+static struct nand_ecc_engine spinand_ondie_ecc_engine = {
+ .ops = &spinand_ondie_ecc_engine_ops,
+};
+
+static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
+{
+ struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
+
+ if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
+ engine_conf)
+ engine_conf->status = status;
+}
+
static int spinand_write_enable_op(struct spinand_device *spinand)
{
struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
@@ -214,7 +343,7 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
@@ -272,7 +401,7 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *wdesc;
unsigned int nbytes, column = 0;
void *buf = spinand->databuf;
@@ -284,9 +413,12 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
* must fill the page cache entirely even if we only want to program
* the data portion of the page, otherwise we might corrupt the BBM or
* user data previously programmed in OOB area.
+ *
+ * Only reset the data buffer manually, the OOB buffer is prepared by
+ * ECC engines ->prepare_io_req() callback.
*/
nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
- memset(spinand->databuf, 0xff, nbytes);
+ memset(spinand->databuf, 0xff, nanddev_page_size(nand));
if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
@@ -402,42 +534,17 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
-static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
-{
- struct nand_device *nand = spinand_to_nand(spinand);
-
- if (spinand->eccinfo.get_status)
- return spinand->eccinfo.get_status(spinand, status);
-
- switch (status & STATUS_ECC_MASK) {
- case STATUS_ECC_NO_BITFLIPS:
- return 0;
-
- case STATUS_ECC_HAS_BITFLIPS:
- /*
- * We have no way to know exactly how many bitflips have been
- * fixed, so let's return the maximum possible value so that
- * wear-leveling layers move the data immediately.
- */
- return nanddev_get_ecc_conf(nand)->strength;
-
- case STATUS_ECC_UNCOR_ERROR:
- return -EBADMSG;
-
- default:
- break;
- }
-
- return -EINVAL;
-}
-
static int spinand_read_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- bool ecc_enabled)
+ const struct nand_page_io_req *req)
{
+ struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
+ ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
ret = spinand_load_page_op(spinand, req);
if (ret)
return ret;
@@ -446,22 +553,26 @@ static int spinand_read_page(struct spinand_device *spinand,
if (ret < 0)
return ret;
+ spinand_ondie_ecc_save_status(nand, status);
+
ret = spinand_read_from_cache_op(spinand, req);
if (ret)
return ret;
- if (!ecc_enabled)
- return 0;
-
- return spinand_check_ecc_status(spinand, status);
+ return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_write_page(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
+ struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
+ ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
ret = spinand_write_enable_op(spinand);
if (ret)
return ret;
@@ -476,9 +587,9 @@ static int spinand_write_page(struct spinand_device *spinand,
ret = spinand_wait(spinand, &status);
if (!ret && (status & STATUS_PROG_FAILED))
- ret = -EIO;
+ return -EIO;
- return ret;
+ return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
@@ -488,25 +599,24 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int max_bitflips = 0;
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
bool ecc_failed = false;
int ret = 0;
- if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
- enable_ecc = true;
+ if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
+ disable_ecc = true;
mutex_lock(&spinand->lock);
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
- ret = spinand_select_target(spinand, iter.req.pos.target);
- if (ret)
- break;
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
- ret = spinand_read_page(spinand, &iter.req, enable_ecc);
+ ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
@@ -537,20 +647,19 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
int ret = 0;
- if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
- enable_ecc = true;
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
mutex_lock(&spinand->lock);
nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
- ret = spinand_select_target(spinand, iter.req.pos.target);
- if (ret)
- break;
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
@@ -580,7 +689,7 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
};
spinand_select_target(spinand, pos->target);
- spinand_read_page(spinand, &req, false);
+ spinand_read_page(spinand, &req);
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
@@ -965,30 +1074,6 @@ static int spinand_detect(struct spinand_device *spinand)
return 0;
}
-static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
-{
- return -ERANGE;
-}
-
-static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
-{
- if (section)
- return -ERANGE;
-
- /* Reserve 2 bytes for the BBM. */
- region->offset = 2;
- region->length = 62;
-
- return 0;
-}
-
-static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
- .ecc = spinand_noecc_ooblayout_ecc,
- .free = spinand_noecc_ooblayout_free,
-};
-
static int spinand_init(struct spinand_device *spinand)
{
struct device *dev = &spinand->spimem->spi->dev;
@@ -1066,10 +1151,15 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_manuf_cleanup;
- /*
- * Right now, we don't support ECC, so let the whole oob
- * area is available for user.
- */
+ /* SPI-NAND default ECC engine is on-die */
+ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
+ nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
+
+ spinand_ecc_enable(spinand, false);
+ ret = nanddev_ecc_engine_init(nand);
+ if (ret)
+ goto err_cleanup_nanddev;
+
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
@@ -1078,14 +1168,11 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_erase = spinand_mtd_erase;
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
- if (spinand->eccinfo.ooblayout)
- mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
- else
- mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
-
- ret = mtd_ooblayout_count_freebytes(mtd);
- if (ret < 0)
- goto err_cleanup_nanddev;
+ if (nand->ecc.engine) {
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ goto err_cleanup_ecc_engine;
+ }
mtd->oobavail = ret;
@@ -1095,6 +1182,9 @@ static int spinand_init(struct spinand_device *spinand)
return 0;
+err_cleanup_ecc_engine:
+ nanddev_ecc_engine_cleanup(nand);
+
err_cleanup_nanddev:
nanddev_cleanup(nand);
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 8e801e4c3a00..6701aaa21a49 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -119,6 +119,53 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF2GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35LF4GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37),
+ NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35LF1G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF2G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF4G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 5d370cfcdaaa..50b7295bc922 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -28,7 +28,7 @@
#define MICRON_SELECT_DIE(x) ((x) << 6)
-static SPINAND_OP_VARIANTS(read_cache_variants,
+static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
@@ -36,14 +36,27 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
-static SPINAND_OP_VARIANTS(write_cache_variants,
+static SPINAND_OP_VARIANTS(x4_write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
-static SPINAND_OP_VARIANTS(update_cache_variants,
+static SPINAND_OP_VARIANTS(x4_update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
+/* Micron MT29F2G01AAAED Device */
+static SPINAND_OP_VARIANTS(x4_read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(x1_write_cache_variants,
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(x1_update_cache_variants,
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
@@ -74,6 +87,47 @@ static const struct mtd_ooblayout_ops micron_8_ooblayout = {
.free = micron_8_ooblayout_free,
};
+static int micron_4_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+
+ if (section >= spinand->base.memorg.pagesize /
+ mtd->ecc_step_size)
+ return -ERANGE;
+
+ region->offset = (section * 16) + 8;
+ region->length = 8;
+
+ return 0;
+}
+
+static int micron_4_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+
+ if (section >= spinand->base.memorg.pagesize /
+ mtd->ecc_step_size)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = 16 * section;
+ region->length = 8;
+ } else {
+ /* section 0 has two bytes reserved for the BBM */
+ region->offset = 2;
+ region->length = 6;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_4_ooblayout = {
+ .ecc = micron_4_ooblayout_ecc,
+ .free = micron_4_ooblayout_free,
+};
+
static int micron_select_target(struct spinand_device *spinand,
unsigned int target)
{
@@ -120,9 +174,9 @@ static const struct spinand_info micron_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status)),
@@ -131,9 +185,9 @@ static const struct spinand_info micron_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status)),
@@ -142,9 +196,9 @@ static const struct spinand_info micron_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status)),
@@ -153,9 +207,9 @@ static const struct spinand_info micron_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status)),
@@ -164,9 +218,9 @@ static const struct spinand_info micron_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36),
NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status),
@@ -176,9 +230,9 @@ static const struct spinand_info micron_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
SPINAND_HAS_CR_FEAT_BIT,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status)),
@@ -187,9 +241,9 @@ static const struct spinand_info micron_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
SPINAND_HAS_CR_FEAT_BIT,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status)),
@@ -198,9 +252,9 @@ static const struct spinand_info micron_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
SPINAND_HAS_CR_FEAT_BIT,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status),
@@ -210,13 +264,23 @@ static const struct spinand_info micron_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
SPINAND_HAS_CR_FEAT_BIT,
SPINAND_ECCINFO(&micron_8_ooblayout,
micron_8_ecc_get_status),
SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M69A 2Gb 3.3V */
+ SPINAND_INFO("MT29F2G01AAAED",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9F),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 80, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&x4_read_cache_variants,
+ &x1_write_cache_variants,
+ &x1_update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_4_ooblayout, NULL)),
};
static int micron_spinand_init(struct spinand_device *spinand)
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 21fde2875674..7380b1ebaccd 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -28,7 +28,7 @@ static SPINAND_OP_VARIANTS(update_cache_x4_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-/**
+/*
* Backward compatibility for 1st generation Serial NAND devices
* which don't support Quad Program Load operation.
*/
diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c
index a79e4d866b08..0ddff1a4b51f 100644
--- a/drivers/mtd/parsers/cmdlinepart.c
+++ b/drivers/mtd/parsers/cmdlinepart.c
@@ -226,7 +226,7 @@ static int mtdpart_setup_real(char *s)
struct cmdline_mtd_partition *this_mtd;
struct mtd_partition *parts;
int mtd_id_len, num_parts;
- char *p, *mtd_id, *semicol;
+ char *p, *mtd_id, *semicol, *open_parenth;
/*
* Replace the first ';' by a NULL char so strrchr can work
@@ -236,6 +236,14 @@ static int mtdpart_setup_real(char *s)
if (semicol)
*semicol = '\0';
+ /*
+ * make sure that part-names with ":" will not be handled as
+ * part of the mtd-id with an ":"
+ */
+ open_parenth = strchr(s, '(');
+ if (open_parenth)
+ *open_parenth = '\0';
+
mtd_id = s;
/*
@@ -245,6 +253,10 @@ static int mtdpart_setup_real(char *s)
*/
p = strrchr(s, ':');
+ /* Restore the '(' now. */
+ if (open_parenth)
+ *open_parenth = '(';
+
/* Restore the ';' now. */
if (semicol)
*semicol = ';';
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index b9f272408c4d..4d1ae25507ab 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -13,7 +13,7 @@
#include <linux/sysfs.h>
#include <linux/bitops.h>
#include <linux/slab.h>
-#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include "nand/raw/sm_common.h"
#include "sm_ftl.h"
@@ -216,20 +216,19 @@ static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset,
static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
uint8_t ecc[3];
- __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
- if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
+ ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
+ if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
+ sm_order) < 0)
return -EIO;
buffer += SM_SMALL_PAGE;
- __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
- if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
+ ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
+ if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
+ sm_order) < 0)
return -EIO;
return 0;
}
@@ -369,6 +368,7 @@ static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
int zone, int block, int lba,
unsigned long invalid_bitmap)
{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
struct sm_oob oob;
int boffset;
int retry = 0;
@@ -395,13 +395,13 @@ restart:
}
if (ftl->smallpagenand) {
- __nand_calculate_ecc(buf + boffset, SM_SMALL_PAGE,
- oob.ecc1,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
+ ecc_sw_hamming_calculate(buf + boffset,
+ SM_SMALL_PAGE, oob.ecc1,
+ sm_order);
- __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
- SM_SMALL_PAGE, oob.ecc2,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
+ ecc_sw_hamming_calculate(buf + boffset + SM_SMALL_PAGE,
+ SM_SMALL_PAGE, oob.ecc2,
+ sm_order);
}
if (!sm_write_sector(ftl, zone, block, boffset,
buf + boffset, &oob))
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index ffc4b380f2b1..24cd25de2b8b 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -24,6 +24,50 @@ config MTD_SPI_NOR_USE_4K_SECTORS
Please note that some tools/drivers/filesystems may not work with
4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
+choice
+ prompt "Software write protection at boot"
+ default MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE
+
+config MTD_SPI_NOR_SWP_DISABLE
+ bool "Disable SWP on any flashes (legacy behavior)"
+ help
+ This option disables the software write protection on any SPI
+ flashes at boot-up.
+
+ Depending on the flash chip this either clears the block protection
+ bits or does a "Global Unprotect" command.
+
+ Don't use this if you intent to use the software write protection
+ of your SPI flash. This is only to keep backwards compatibility.
+
+config MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE
+ bool "Disable SWP on flashes w/ volatile protection bits"
+ help
+ Some SPI flashes have volatile block protection bits, ie. after a
+ power-up or a reset the flash is software write protected by
+ default.
+
+ This option disables the software write protection for these kind
+ of flashes while keeping it enabled for any other SPI flashes
+ which have non-volatile write protection bits.
+
+ If the software write protection will be disabled depending on
+ the flash either the block protection bits are cleared or a
+ "Global Unprotect" command is issued.
+
+ If you are unsure, select this option.
+
+config MTD_SPI_NOR_SWP_KEEP
+ bool "Keep software write protection as is"
+ help
+ If you select this option the software write protection of any
+ SPI flashes will not be changed. If your flash is software write
+ protected or will be automatically software write protected after
+ power-up you have to manually unlock it before you are able to
+ write to it.
+
+endchoice
+
source "drivers/mtd/spi-nor/controllers/Kconfig"
endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 3f5f21a473a6..1fea5cab492c 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -8,39 +8,192 @@
#include "core.h"
+#define ATMEL_SR_GLOBAL_PROTECT_MASK GENMASK(5, 2)
+
+/*
+ * The Atmel AT25FS010/AT25FS040 parts have some weird configuration for the
+ * block protection bits. We don't support them. But legacy behavior in linux
+ * is to unlock the whole flash array on startup. Therefore, we have to support
+ * exactly this operation.
+ */
+static int atmel_at25fs_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static int atmel_at25fs_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ /* We only support unlocking the whole flash array */
+ if (ofs || len != nor->params->size)
+ return -EINVAL;
+
+ /* Write 0x00 to the status register to disable write protection */
+ ret = spi_nor_write_sr_and_check(nor, 0);
+ if (ret)
+ dev_dbg(nor->dev, "unable to clear BP bits, WP# asserted?\n");
+
+ return ret;
+}
+
+static int atmel_at25fs_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct spi_nor_locking_ops atmel_at25fs_locking_ops = {
+ .lock = atmel_at25fs_lock,
+ .unlock = atmel_at25fs_unlock,
+ .is_locked = atmel_at25fs_is_locked,
+};
+
+static void atmel_at25fs_default_init(struct spi_nor *nor)
+{
+ nor->params->locking_ops = &atmel_at25fs_locking_ops;
+}
+
+static const struct spi_nor_fixups atmel_at25fs_fixups = {
+ .default_init = atmel_at25fs_default_init,
+};
+
+/**
+ * atmel_set_global_protection - Do a Global Protect or Unprotect command
+ * @nor: pointer to 'struct spi_nor'
+ * @ofs: offset in bytes
+ * @len: len in bytes
+ * @is_protect: if true do a Global Protect otherwise it is a Global Unprotect
+ *
+ * Return: 0 on success, -error otherwise.
+ */
+static int atmel_set_global_protection(struct spi_nor *nor, loff_t ofs,
+ uint64_t len, bool is_protect)
+{
+ int ret;
+ u8 sr;
+
+ /* We only support locking the whole flash array */
+ if (ofs || len != nor->params->size)
+ return -EINVAL;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ sr = nor->bouncebuf[0];
+
+ /* SRWD bit needs to be cleared, otherwise the protection doesn't change */
+ if (sr & SR_SRWD) {
+ sr &= ~SR_SRWD;
+ ret = spi_nor_write_sr_and_check(nor, sr);
+ if (ret) {
+ dev_dbg(nor->dev, "unable to clear SRWD bit, WP# asserted?\n");
+ return ret;
+ }
+ }
+
+ if (is_protect) {
+ sr |= ATMEL_SR_GLOBAL_PROTECT_MASK;
+ /*
+ * Set the SRWD bit again as soon as we are protecting
+ * anything. This will ensure that the WP# pin is working
+ * correctly. By doing this we also behave the same as
+ * spi_nor_sr_lock(), which sets SRWD if any block protection
+ * is active.
+ */
+ sr |= SR_SRWD;
+ } else {
+ sr &= ~ATMEL_SR_GLOBAL_PROTECT_MASK;
+ }
+
+ nor->bouncebuf[0] = sr;
+
+ /*
+ * We cannot use the spi_nor_write_sr_and_check() because this command
+ * isn't really setting any bits, instead it is an pseudo command for
+ * "Global Unprotect" or "Global Protect"
+ */
+ return spi_nor_write_sr(nor, nor->bouncebuf, 1);
+}
+
+static int atmel_global_protect(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return atmel_set_global_protection(nor, ofs, len, true);
+}
+
+static int atmel_global_unprotect(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return atmel_set_global_protection(nor, ofs, len, false);
+}
+
+static int atmel_is_global_protected(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ if (ofs >= nor->params->size || (ofs + len) > nor->params->size)
+ return -EINVAL;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return ((nor->bouncebuf[0] & ATMEL_SR_GLOBAL_PROTECT_MASK) == ATMEL_SR_GLOBAL_PROTECT_MASK);
+}
+
+static const struct spi_nor_locking_ops atmel_global_protection_ops = {
+ .lock = atmel_global_protect,
+ .unlock = atmel_global_unprotect,
+ .is_locked = atmel_is_global_protected,
+};
+
+static void atmel_global_protection_default_init(struct spi_nor *nor)
+{
+ nor->params->locking_ops = &atmel_global_protection_ops;
+}
+
+static const struct spi_nor_fixups atmel_global_protection_fixups = {
+ .default_init = atmel_global_protection_default_init,
+};
+
static const struct flash_info atmel_parts[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK)
+ .fixups = &atmel_at25fs_fixups },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK)
+ .fixups = &atmel_at25fs_fixups },
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
- { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ .fixups = &atmel_global_protection_fixups },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ .fixups = &atmel_global_protection_fixups },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ .fixups = &atmel_global_protection_fixups },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ .fixups = &atmel_global_protection_fixups },
{ "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ .fixups = &atmel_global_protection_fixups },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ .fixups = &atmel_global_protection_fixups },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ .fixups = &atmel_global_protection_fixups },
{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
};
-static void atmel_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static const struct spi_nor_fixups atmel_fixups = {
- .default_init = atmel_default_init,
-};
-
const struct spi_nor_manufacturer spi_nor_atmel = {
.name = "atmel",
.parts = atmel_parts,
.nparts = ARRAY_SIZE(atmel_parts),
- .fixups = &atmel_fixups,
};
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 95c502173cbd..7c26f8f565cb 100644
--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -320,7 +320,7 @@ static const struct spi_nor_controller_ops hisi_controller_ops = {
.write = hisi_spi_nor_write,
};
-/**
+/*
* Get spi flash device information and register it as a mtd device.
*/
static int hisi_spi_nor_register(struct device_node *np,
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index f0ae7a01703a..20df44b753da 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -40,6 +40,81 @@
#define SPI_NOR_MAX_ADDR_WIDTH 4
+#define SPI_NOR_SRST_SLEEP_MIN 200
+#define SPI_NOR_SRST_SLEEP_MAX 400
+
+/**
+ * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
+ * extension type.
+ * @nor: pointer to a 'struct spi_nor'
+ * @op: pointer to the 'struct spi_mem_op' whose properties
+ * need to be initialized.
+ *
+ * Right now, only "repeat" and "invert" are supported.
+ *
+ * Return: The opcode extension.
+ */
+static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
+ const struct spi_mem_op *op)
+{
+ switch (nor->cmd_ext_type) {
+ case SPI_NOR_EXT_INVERT:
+ return ~op->cmd.opcode;
+
+ case SPI_NOR_EXT_REPEAT:
+ return op->cmd.opcode;
+
+ default:
+ dev_err(nor->dev, "Unknown command extension type\n");
+ return 0;
+ }
+}
+
+/**
+ * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
+ * @nor: pointer to a 'struct spi_nor'
+ * @op: pointer to the 'struct spi_mem_op' whose properties
+ * need to be initialized.
+ * @proto: the protocol from which the properties need to be set.
+ */
+void spi_nor_spimem_setup_op(const struct spi_nor *nor,
+ struct spi_mem_op *op,
+ const enum spi_nor_protocol proto)
+{
+ u8 ext;
+
+ op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
+
+ if (op->addr.nbytes)
+ op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
+
+ if (op->dummy.nbytes)
+ op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
+
+ if (op->data.nbytes)
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
+
+ if (spi_nor_protocol_is_dtr(proto)) {
+ /*
+ * SPIMEM supports mixed DTR modes, but right now we can only
+ * have all phases either DTR or STR. IOW, SPIMEM can have
+ * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
+ * phases to either DTR or STR.
+ */
+ op->cmd.dtr = true;
+ op->addr.dtr = true;
+ op->dummy.dtr = true;
+ op->data.dtr = true;
+
+ /* 2 bytes per clock cycle in DTR mode. */
+ op->dummy.nbytes *= 2;
+
+ ext = spi_nor_get_cmd_ext(nor, op);
+ op->cmd.opcode = (op->cmd.opcode << 8) | ext;
+ op->cmd.nbytes = 2;
+ }
+}
+
/**
* spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
* transfer
@@ -82,6 +157,32 @@ static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
return spi_mem_exec_op(nor->spimem, op);
}
+static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, size_t len)
+{
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
+ return -EOPNOTSUPP;
+
+ return nor->controller_ops->read_reg(nor, opcode, buf, len);
+}
+
+static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
+ const u8 *buf, size_t len)
+{
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
+ return -EOPNOTSUPP;
+
+ return nor->controller_ops->write_reg(nor, opcode, buf, len);
+}
+
+static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
+{
+ if (spi_nor_protocol_is_dtr(nor->write_proto))
+ return -EOPNOTSUPP;
+
+ return nor->controller_ops->erase(nor, offs);
+}
+
/**
* spi_nor_spimem_read_data() - read data from flash's memory region via
* spi-mem
@@ -96,22 +197,20 @@ static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
size_t len, u8 *buf)
{
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
- SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
- SPI_MEM_OP_DATA_IN(len, buf, 1));
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_width, from, 0),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
+ SPI_MEM_OP_DATA_IN(len, buf, 0));
bool usebouncebuf;
ssize_t nbytes;
int error;
- /* get transfer protocols. */
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+ spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
/* convert the dummy cycles to the number of bytes */
op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op.dummy.nbytes *= 2;
usebouncebuf = spi_nor_spimem_bounce(nor, &op);
@@ -162,20 +261,18 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
size_t len, const u8 *buf)
{
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ SPI_MEM_OP_DATA_OUT(len, buf, 0));
ssize_t nbytes;
int error;
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
-
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
op.addr.nbytes = 0;
+ spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+
if (spi_nor_spimem_bounce(nor, &op))
memcpy(nor->bouncebuf, buf, op.data.nbytes);
@@ -222,15 +319,17 @@ int spi_nor_write_enable(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
+ NULL, 0);
}
if (ret)
@@ -251,15 +350,17 @@ int spi_nor_write_disable(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
+ NULL, 0);
}
if (ret)
@@ -272,25 +373,37 @@ int spi_nor_write_disable(struct spi_nor *nor)
* spi_nor_read_sr() - Read the Status Register.
* @nor: pointer to 'struct spi_nor'.
* @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
+ * Status Register will be written. Should be at least 2 bytes.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
+int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
+ SPI_MEM_OP_DATA_IN(1, sr, 0));
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.addr.nbytes = nor->params->rdsr_addr_nbytes;
+ op.dummy.nbytes = nor->params->rdsr_dummy;
+ /*
+ * We don't want to read only one byte in DTR mode. So,
+ * read 2 and then discard the second byte.
+ */
+ op.data.nbytes = 2;
+ }
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
- sr, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
+ 1);
}
if (ret)
@@ -303,7 +416,8 @@ static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
* spi_nor_read_fsr() - Read the Flag Status Register.
* @nor: pointer to 'struct spi_nor'
* @fsr: pointer to a DMA-able buffer where the value of the
- * Flag Status Register will be written.
+ * Flag Status Register will be written. Should be at least 2
+ * bytes.
*
* Return: 0 on success, -errno otherwise.
*/
@@ -313,15 +427,27 @@ static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, fsr, 1));
+ SPI_MEM_OP_DATA_IN(1, fsr, 0));
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.addr.nbytes = nor->params->rdsr_addr_nbytes;
+ op.dummy.nbytes = nor->params->rdsr_dummy;
+ /*
+ * We don't want to read only one byte in DTR mode. So,
+ * read 2 and then discard the second byte.
+ */
+ op.data.nbytes = 2;
+ }
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
- fsr, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr,
+ 1);
}
if (ret)
@@ -345,14 +471,17 @@ static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, cr, 1));
+ SPI_MEM_OP_DATA_IN(1, cr, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
+ 1);
}
if (ret)
@@ -378,17 +507,19 @@ int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
SPINOR_OP_EN4B :
SPINOR_OP_EX4B,
- 1),
+ 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor,
- enable ? SPINOR_OP_EN4B :
- SPINOR_OP_EX4B,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor,
+ enable ? SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ NULL, 0);
}
if (ret)
@@ -414,15 +545,17 @@ static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
- nor->bouncebuf, 1);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
+ nor->bouncebuf, 1);
}
if (ret)
@@ -446,15 +579,17 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
- nor->bouncebuf, 1);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREAR,
+ nor->bouncebuf, 1);
}
if (ret)
@@ -477,15 +612,17 @@ int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
+ SPI_MEM_OP_DATA_IN(1, sr, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
- sr, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr,
+ 1);
}
if (ret)
@@ -522,15 +659,17 @@ static void spi_nor_clear_sr(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
}
if (ret)
@@ -586,15 +725,17 @@ static void spi_nor_clear_fsr(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
}
if (ret)
@@ -720,7 +861,7 @@ int spi_nor_wait_till_ready(struct spi_nor *nor)
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
+int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
{
int ret;
@@ -730,15 +871,17 @@ static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, sr, 1));
+ SPI_MEM_OP_DATA_OUT(len, sr, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
- sr, len);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
+ len);
}
if (ret) {
@@ -906,7 +1049,7 @@ static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
+int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
{
if (nor->flags & SNOR_F_HAS_16BIT_SR)
return spi_nor_write_16bit_sr_and_check(nor, sr1);
@@ -932,15 +1075,17 @@ static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, sr2, 1));
+ SPI_MEM_OP_DATA_OUT(1, sr2, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
- sr2, 1);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
+ sr2, 1);
}
if (ret) {
@@ -966,15 +1111,17 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr2, 1));
+ SPI_MEM_OP_DATA_IN(1, sr2, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
- sr2, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
+ 1);
}
if (ret)
@@ -997,15 +1144,18 @@ static int spi_nor_erase_chip(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor,
+ SPINOR_OP_CHIP_ERASE,
+ NULL, 0);
}
if (ret)
@@ -1139,14 +1289,16 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_width, addr, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+
return spi_mem_exec_op(nor->spimem, &op);
} else if (nor->controller_ops->erase) {
- return nor->controller_ops->erase(nor, addr);
+ return spi_nor_controller_ops_erase(nor, addr);
}
/*
@@ -1158,8 +1310,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
addr >>= 8;
}
- return nor->controller_ops->write_reg(nor, nor->erase_opcode,
- nor->bouncebuf, nor->addr_width);
+ return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
+ nor->bouncebuf, nor->addr_width);
}
/**
@@ -1447,7 +1599,7 @@ destroy_erase_cmd_list:
/*
* Erase an address range on the nor chip. The address range may extend
- * one or more erase sectors. Return an error is there is a problem erasing.
+ * one or more erase sectors. Return an error if there is a problem erasing.
*/
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
@@ -2204,7 +2356,7 @@ static int spi_nor_check(struct spi_nor *nor)
return 0;
}
-static void
+void
spi_nor_set_read_settings(struct spi_nor_read_command *read,
u8 num_mode_clocks,
u8 num_wait_states,
@@ -2253,6 +2405,7 @@ int spi_nor_hwcaps_read2cmd(u32 hwcaps)
{ SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
{ SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
{ SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
};
return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
@@ -2269,6 +2422,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
{ SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
{ SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
{ SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
};
return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
@@ -2281,7 +2435,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
*@nor: pointer to a 'struct spi_nor'
*@op: pointer to op template to be checked
*
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
*/
static int spi_nor_spimem_check_op(struct spi_nor *nor,
struct spi_mem_op *op)
@@ -2295,12 +2449,12 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor,
op->addr.nbytes = 4;
if (!spi_mem_supports_op(nor->spimem, op)) {
if (nor->mtd.size > SZ_16M)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* If flash size <= 16MB, 3 address bytes are sufficient */
op->addr.nbytes = 3;
if (!spi_mem_supports_op(nor->spimem, op))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -2312,22 +2466,22 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor,
*@nor: pointer to a 'struct spi_nor'
*@read: pointer to op template to be checked
*
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
*/
static int spi_nor_spimem_check_readop(struct spi_nor *nor,
const struct spi_nor_read_command *read)
{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
- SPI_MEM_OP_DUMMY(0, 1),
- SPI_MEM_OP_DATA_IN(0, NULL, 1));
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0),
+ SPI_MEM_OP_ADDR(3, 0, 0),
+ SPI_MEM_OP_DUMMY(1, 0),
+ SPI_MEM_OP_DATA_IN(1, NULL, 0));
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
- op.dummy.buswidth / 8;
+ spi_nor_spimem_setup_op(nor, &op, read->proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op.dummy.nbytes *= 2;
return spi_nor_spimem_check_op(nor, &op);
}
@@ -2338,19 +2492,17 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor,
*@nor: pointer to a 'struct spi_nor'
*@pp: pointer to op template to be checked
*
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
*/
static int spi_nor_spimem_check_pp(struct spi_nor *nor,
const struct spi_nor_pp_command *pp)
{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0),
+ SPI_MEM_OP_ADDR(3, 0, 0),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(0, NULL, 1));
+ SPI_MEM_OP_DATA_OUT(1, NULL, 0));
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
+ spi_nor_spimem_setup_op(nor, &op, pp->proto);
return spi_nor_spimem_check_op(nor, &op);
}
@@ -2368,12 +2520,16 @@ spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
struct spi_nor_flash_parameter *params = nor->params;
unsigned int cap;
- /* DTR modes are not supported yet, mask them all. */
- *hwcaps &= ~SNOR_HWCAPS_DTR;
-
/* X-X-X modes are not supported yet, mask them all. */
*hwcaps &= ~SNOR_HWCAPS_X_X_X;
+ /*
+ * If the reset line is broken, we do not want to enter a stateful
+ * mode.
+ */
+ if (nor->flags & SNOR_F_BROKEN_RESET)
+ *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
+
for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
int rdidx, ppidx;
@@ -2537,7 +2693,7 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
}
/*
- * Otherwise, the current erase size is still a valid canditate.
+ * Otherwise, the current erase size is still a valid candidate.
* Select the biggest valid candidate.
*/
if (!erase && tested_erase->size)
@@ -2628,7 +2784,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
* controller directly implements the spi_nor interface.
* Yet another reason to switch to spi-mem.
*/
- ignored_mask = SNOR_HWCAPS_X_X_X;
+ ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
if (shared_mask & ignored_mask) {
dev_dbg(nor->dev,
"SPI n-n-n protocols are not supported.\n");
@@ -2729,6 +2885,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
+ params->writesize = 1;
params->size = (u64)info->sector_size * info->n_sectors;
params->page_size = info->page_size;
@@ -2773,11 +2930,28 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
SNOR_PROTO_1_1_8);
}
+ if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, 20, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_8_8_8_DTR);
+ }
+
/* Page Program settings. */
params->hwcaps.mask |= SNOR_HWCAPS_PP;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ if (info->flags & SPI_NOR_OCTAL_DTR_PP) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
+ }
+
/*
* Sector Erase settings. Sort Erase Types in ascending order, with the
* smallest erase size starting at BIT(0).
@@ -2885,7 +3059,8 @@ static int spi_nor_init_params(struct spi_nor *nor)
spi_nor_manufacturer_init_params(nor);
- if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
+ if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) &&
!(nor->info->flags & SPI_NOR_SKIP_SFDP))
spi_nor_sfdp_init_params(nor);
@@ -2896,6 +3071,38 @@ static int spi_nor_init_params(struct spi_nor *nor)
return 0;
}
+/** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
+ * @nor: pointer to a 'struct spi_nor'
+ * @enable: whether to enable or disable Octal DTR
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ if (!nor->params->octal_dtr_enable)
+ return 0;
+
+ if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
+ nor->write_proto == SNOR_PROTO_8_8_8_DTR))
+ return 0;
+
+ if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
+ return 0;
+
+ ret = nor->params->octal_dtr_enable(nor, enable);
+ if (ret)
+ return ret;
+
+ if (enable)
+ nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
+ else
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+
+ return 0;
+}
+
/**
* spi_nor_quad_enable() - enable Quad I/O if needed.
* @nor: pointer to a 'struct spi_nor'
@@ -2915,39 +3122,65 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
}
/**
- * spi_nor_unlock_all() - Unlocks the entire flash memory array.
+ * spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array.
* @nor: pointer to a 'struct spi_nor'.
*
* Some SPI NOR flashes are write protected by default after a power-on reset
* cycle, in order to avoid inadvertent writes during power-up. Backward
* compatibility imposes to unlock the entire flash memory array at power-up
* by default.
+ *
+ * Unprotecting the entire flash array will fail for boards which are hardware
+ * write-protected. Thus any errors are ignored.
*/
-static int spi_nor_unlock_all(struct spi_nor *nor)
+static void spi_nor_try_unlock_all(struct spi_nor *nor)
{
- if (nor->flags & SNOR_F_HAS_LOCK)
- return spi_nor_unlock(&nor->mtd, 0, nor->params->size);
+ int ret;
- return 0;
+ if (!(nor->flags & SNOR_F_HAS_LOCK))
+ return;
+
+ dev_dbg(nor->dev, "Unprotecting entire flash array\n");
+
+ ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size);
+ if (ret)
+ dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
}
static int spi_nor_init(struct spi_nor *nor)
{
int err;
- err = spi_nor_quad_enable(nor);
+ err = spi_nor_octal_dtr_enable(nor, true);
if (err) {
- dev_dbg(nor->dev, "quad mode not supported\n");
+ dev_dbg(nor->dev, "octal mode not supported\n");
return err;
}
- err = spi_nor_unlock_all(nor);
+ err = spi_nor_quad_enable(nor);
if (err) {
- dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
+ dev_dbg(nor->dev, "quad mode not supported\n");
return err;
}
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
+ /*
+ * Some SPI NOR flashes are write protected by default after a power-on
+ * reset cycle, in order to avoid inadvertent writes during power-up.
+ * Backward compatibility imposes to unlock the entire flash memory
+ * array at power-up by default. Depending on the kernel configuration
+ * (1) do nothing, (2) always unlock the entire flash array or (3)
+ * unlock the entire flash array only when the software write
+ * protection bits are volatile. The latter is indicated by
+ * SNOR_F_SWP_IS_VOLATILE.
+ */
+ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
+ (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
+ nor->flags & SNOR_F_SWP_IS_VOLATILE))
+ spi_nor_try_unlock_all(nor);
+
+ if (nor->addr_width == 4 &&
+ nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
+ !(nor->flags & SNOR_F_4B_OPCODES)) {
/*
* If the RESET# pin isn't hooked up properly, or the system
* otherwise doesn't perform a reset command in the boot
@@ -2963,6 +3196,59 @@ static int spi_nor_init(struct spi_nor *nor)
return 0;
}
+static void spi_nor_soft_reset(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ int ret;
+
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DATA);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret) {
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ return;
+ }
+
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DATA);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret) {
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ return;
+ }
+
+ /*
+ * Software Reset is not instant, and the delay varies from flash to
+ * flash. Looking at a few flashes, most range somewhere below 100
+ * microseconds. So, sleep for a range of 200-400 us.
+ */
+ usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
+}
+
+/* mtd suspend handler */
+static int spi_nor_suspend(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ /* Disable octal DTR mode if we enabled it. */
+ ret = spi_nor_octal_dtr_enable(nor, false);
+ if (ret)
+ dev_err(nor->dev, "suspend() failed\n");
+
+ return ret;
+}
+
/* mtd resume handler */
static void spi_nor_resume(struct mtd_info *mtd)
{
@@ -2982,6 +3268,9 @@ void spi_nor_restore(struct spi_nor *nor)
if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
nor->flags & SNOR_F_BROKEN_RESET)
nor->params->set_4byte_addr_mode(nor, false);
+
+ if (nor->flags & SNOR_F_SOFT_RESET)
+ spi_nor_soft_reset(nor);
}
EXPORT_SYMBOL_GPL(spi_nor_restore);
@@ -3006,6 +3295,20 @@ static int spi_nor_set_addr_width(struct spi_nor *nor)
{
if (nor->addr_width) {
/* already configured from SFDP */
+ } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
+ /*
+ * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
+ * in this protocol an odd address width cannot be used because
+ * then the address phase would only span a cycle and a half.
+ * Half a cycle would be left over. We would then have to start
+ * the dummy phase in the middle of a cycle and so too the data
+ * phase, and we will end the transaction with half a cycle left
+ * over.
+ *
+ * Force all 8D-8D-8D flashes to use an address width of 4 to
+ * avoid this situation.
+ */
+ nor->addr_width = 4;
} else if (nor->info->addr_width) {
nor->addr_width = nor->info->addr_width;
} else {
@@ -3146,11 +3449,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->name = dev_name(dev);
mtd->priv = nor;
mtd->type = MTD_NORFLASH;
- mtd->writesize = 1;
+ mtd->writesize = nor->params->writesize;
mtd->flags = MTD_CAP_NORFLASH;
mtd->size = nor->params->size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+ mtd->_suspend = spi_nor_suspend;
mtd->_resume = spi_nor_resume;
if (nor->params->locking_ops) {
@@ -3171,6 +3475,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
if (info->flags & USE_CLSR)
nor->flags |= SNOR_F_USE_CLSR;
+ if (info->flags & SPI_NOR_SWP_IS_VOLATILE)
+ nor->flags |= SNOR_F_SWP_IS_VOLATILE;
if (info->flags & SPI_NOR_4BIT_BP) {
nor->flags |= SNOR_F_HAS_4BIT_BP;
@@ -3201,6 +3507,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & SPI_NOR_4B_OPCODES)
nor->flags |= SNOR_F_4B_OPCODES;
+ if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE)
+ nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
+
ret = spi_nor_set_addr_width(nor);
if (ret)
return ret;
@@ -3236,23 +3545,28 @@ EXPORT_SYMBOL_GPL(spi_nor_scan);
static int spi_nor_create_read_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
- .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
- SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
- SPI_MEM_OP_DATA_IN(0, NULL, 1)),
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
+ SPI_MEM_OP_DATA_IN(0, NULL, 0)),
.offset = 0,
.length = nor->mtd.size,
};
struct spi_mem_op *op = &info.op_tmpl;
- /* get transfer protocols. */
- op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
- op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
- op->dummy.buswidth = op->addr.buswidth;
- op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+ spi_nor_spimem_setup_op(nor, op, nor->read_proto);
/* convert the dummy cycles to the number of bytes */
op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op->dummy.nbytes *= 2;
+
+ /*
+ * Since spi_nor_spimem_setup_op() only sets buswidth when the number
+ * of data bytes is non-zero, the data buswidth won't be set here. So,
+ * do it explicitly.
+ */
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
&info);
@@ -3262,24 +3576,27 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor)
static int spi_nor_create_write_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
- .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
+ SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
.length = nor->mtd.size,
};
struct spi_mem_op *op = &info.op_tmpl;
- /* get transfer protocols. */
- op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
- op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
- op->dummy.buswidth = op->addr.buswidth;
- op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
-
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
op->addr.nbytes = 0;
+ spi_nor_spimem_setup_op(nor, op, nor->write_proto);
+
+ /*
+ * Since spi_nor_spimem_setup_op() only sets buswidth when the number
+ * of data bytes is non-zero, the data buswidth won't be set here. So,
+ * do it explicitly.
+ */
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
&info);
return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 6f2f6b27173f..d631ee299de3 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -26,6 +26,9 @@ enum spi_nor_option_flags {
SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
SNOR_F_HAS_4BIT_BP = BIT(12),
SNOR_F_HAS_SR_BP3_BIT6 = BIT(13),
+ SNOR_F_IO_MODE_EN_VOLATILE = BIT(14),
+ SNOR_F_SOFT_RESET = BIT(15),
+ SNOR_F_SWP_IS_VOLATILE = BIT(16),
};
struct spi_nor_read_command {
@@ -62,6 +65,7 @@ enum spi_nor_read_command_index {
SNOR_CMD_READ_1_8_8,
SNOR_CMD_READ_8_8_8,
SNOR_CMD_READ_1_8_8_DTR,
+ SNOR_CMD_READ_8_8_8_DTR,
SNOR_CMD_READ_MAX
};
@@ -78,6 +82,7 @@ enum spi_nor_pp_command_index {
SNOR_CMD_PP_1_1_8,
SNOR_CMD_PP_1_8_8,
SNOR_CMD_PP_8_8_8,
+ SNOR_CMD_PP_8_8_8_DTR,
SNOR_CMD_PP_MAX
};
@@ -189,7 +194,12 @@ struct spi_nor_locking_ops {
* Serial Flash Discoverable Parameters (SFDP) tables.
*
* @size: the flash memory density in bytes.
+ * @writesize Minimal writable flash unit size. Defaults to 1. Set to
+ * ECC unit size for ECC-ed flashes.
* @page_size: the page size of the SPI NOR flash memory.
+ * @rdsr_dummy: dummy cycles needed for Read Status Register command.
+ * @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register
+ * command.
* @hwcaps: describes the read and page program hardware
* capabilities.
* @reads: read capabilities ordered by priority: the higher index
@@ -198,6 +208,7 @@ struct spi_nor_locking_ops {
* higher index in the array, the higher priority.
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
* Table.
+ * @octal_dtr_enable: enables SPI NOR octal DTR mode.
* @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
* @convert_addr: converts an absolute address into something the flash
@@ -211,7 +222,10 @@ struct spi_nor_locking_ops {
*/
struct spi_nor_flash_parameter {
u64 size;
+ u32 writesize;
u32 page_size;
+ u8 rdsr_dummy;
+ u8 rdsr_addr_nbytes;
struct spi_nor_hwcaps hwcaps;
struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
@@ -219,6 +233,7 @@ struct spi_nor_flash_parameter {
struct spi_nor_erase_map erase_map;
+ int (*octal_dtr_enable)(struct spi_nor *nor, bool enable);
int (*quad_enable)(struct spi_nor *nor);
int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
@@ -311,6 +326,18 @@ struct flash_info {
* BP3 is bit 6 of status register.
* Must be used with SPI_NOR_4BIT_BP.
*/
+#define SPI_NOR_OCTAL_DTR_READ BIT(19) /* Flash supports octal DTR Read. */
+#define SPI_NOR_OCTAL_DTR_PP BIT(20) /* Flash supports Octal DTR Page Program */
+#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(21) /*
+ * Flash enables the best
+ * available I/O mode via a
+ * volatile bit.
+ */
+#define SPI_NOR_SWP_IS_VOLATILE BIT(22) /*
+ * Flash has volatile software write
+ * protection bits. Usually these will
+ * power-up in a write-protected state.
+ */
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
@@ -399,6 +426,9 @@ extern const struct spi_nor_manufacturer spi_nor_winbond;
extern const struct spi_nor_manufacturer spi_nor_xilinx;
extern const struct spi_nor_manufacturer spi_nor_xmc;
+void spi_nor_spimem_setup_op(const struct spi_nor *nor,
+ struct spi_mem_op *op,
+ const enum spi_nor_protocol proto);
int spi_nor_write_enable(struct spi_nor *nor);
int spi_nor_write_disable(struct spi_nor *nor);
int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
@@ -409,6 +439,9 @@ void spi_nor_unlock_and_unprep(struct spi_nor *nor);
int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
+int spi_nor_read_sr(struct spi_nor *nor, u8 *sr);
+int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len);
+int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1);
int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
@@ -418,6 +451,11 @@ ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
int spi_nor_hwcaps_read2cmd(u32 hwcaps);
u8 spi_nor_convert_3to4_read(u8 opcode);
+void spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto);
void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
enum spi_nor_protocol proto);
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
index c93170008118..cfc9218c1053 100644
--- a/drivers/mtd/spi-nor/esmt.c
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -11,7 +11,7 @@
static const struct flash_info esmt_parts[] = {
/* ESMT */
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK) },
+ SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_HAS_LOCK) },
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128,
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
index d8196f101368..8ece9cceb3cf 100644
--- a/drivers/mtd/spi-nor/intel.c
+++ b/drivers/mtd/spi-nor/intel.c
@@ -10,23 +10,16 @@
static const struct flash_info intel_parts[] = {
/* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
-};
-
-static void intel_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static const struct spi_nor_fixups intel_fixups = {
- .default_init = intel_default_init,
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32,
+ SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64,
+ SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128,
+ SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
};
const struct spi_nor_manufacturer spi_nor_intel = {
.name = "intel",
.parts = intel_parts,
.nparts = ARRAY_SIZE(intel_parts),
- .fixups = &intel_fixups,
};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index ef3695080710..c224e59820a1 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -8,10 +8,123 @@
#include "core.h"
+#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */
+#define SPINOR_OP_MT_RD_ANY_REG 0x85 /* Read volatile register */
+#define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */
+#define SPINOR_REG_MT_CFR0V 0x00 /* For setting octal DTR mode */
+#define SPINOR_REG_MT_CFR1V 0x01 /* For setting dummy cycles */
+#define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */
+#define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */
+
+static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor, bool enable)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+ int ret;
+
+ if (enable) {
+ /* Use 20 dummy cycles for memory array reads. */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ *buf = 20;
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(3, SPINOR_REG_MT_CFR1V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, buf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+ }
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (enable)
+ *buf = SPINOR_MT_OCT_DTR;
+ else
+ *buf = SPINOR_MT_EXSPI;
+
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(enable ? 3 : 4,
+ SPINOR_REG_MT_CFR0V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, buf, 1));
+
+ if (!enable)
+ spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY(enable ? 8 : 0, 1),
+ SPI_MEM_OP_DATA_IN(round_up(nor->info->id_len, 2),
+ buf, 1));
+
+ if (enable)
+ spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (memcmp(buf, nor->info->id, nor->info->id_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void mt35xu512aba_default_init(struct spi_nor *nor)
+{
+ nor->params->octal_dtr_enable = spi_nor_micron_octal_dtr_enable;
+}
+
+static void mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
+{
+ /* Set the Fast Read settings. */
+ nor->params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
+ spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, 20, SPINOR_OP_MT_DTR_RD,
+ SNOR_PROTO_8_8_8_DTR);
+
+ nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
+ nor->params->rdsr_dummy = 8;
+ nor->params->rdsr_addr_nbytes = 0;
+
+ /*
+ * The BFPT quad enable field is set to a reserved value so the quad
+ * enable function is ignored by spi_nor_parse_bfpt(). Make sure we
+ * disable it.
+ */
+ nor->params->quad_enable = NULL;
+}
+
+static struct spi_nor_fixups mt35xu512aba_fixups = {
+ .default_init = mt35xu512aba_default_init,
+ .post_sfdp = mt35xu512aba_post_sfdp_fixup,
+};
+
static const struct flash_info micron_parts[] = {
{ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES) },
+ SPI_NOR_4B_OPCODES | SPI_NOR_OCTAL_DTR_READ |
+ SPI_NOR_OCTAL_DTR_PP |
+ SPI_NOR_IO_MODE_EN_VOLATILE)
+ .fixups = &mt35xu512aba_fixups},
{ "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
SPI_NOR_4B_OPCODES) },
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index e2a43d39eb5f..6ee7719e5903 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -4,6 +4,7 @@
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/mtd/spi-nor.h>
@@ -19,6 +20,11 @@
#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
+#define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */
+#define SFDP_SCCR_MAP_ID 0xff87 /*
+ * Status, Control and Configuration
+ * Register Map.
+ */
#define SFDP_SIGNATURE 0x50444653U
@@ -59,7 +65,7 @@ struct sfdp_bfpt_read {
struct sfdp_bfpt_erase {
/*
- * The half-word at offset <shift> in DWORD <dwoard> encodes the
+ * The half-word at offset <shift> in DWORD <dword> encodes the
* op code and erase sector size to be used by Sector Erase commands.
*/
u32 dword;
@@ -602,10 +608,32 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
break;
}
+ /* Soft Reset support. */
+ if (bfpt.dwords[BFPT_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST)
+ nor->flags |= SNOR_F_SOFT_RESET;
+
/* Stop here if not JESD216 rev C or later. */
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
params);
+ /* 8D-8D-8D command extension. */
+ switch (bfpt.dwords[BFPT_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) {
+ case BFPT_DWORD18_CMD_EXT_REP:
+ nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
+ break;
+
+ case BFPT_DWORD18_CMD_EXT_INV:
+ nor->cmd_ext_type = SPI_NOR_EXT_INVERT;
+ break;
+
+ case BFPT_DWORD18_CMD_EXT_RES:
+ dev_dbg(nor->dev, "Reserved command extension used\n");
+ break;
+
+ case BFPT_DWORD18_CMD_EXT_16B:
+ dev_dbg(nor->dev, "16-bit opcodes not supported\n");
+ return -EOPNOTSUPP;
+ }
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
}
@@ -1047,9 +1075,16 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
}
/* 4BAIT is the only SFDP table that indicates page program support. */
- if (pp_hwcaps & SNOR_HWCAPS_PP)
+ if (pp_hwcaps & SNOR_HWCAPS_PP) {
spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
+ }
if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
SPINOR_OP_PP_1_1_4_4B,
@@ -1083,6 +1118,131 @@ out:
return ret;
}
+#define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29)
+#define PROFILE1_DWORD1_RDSR_DUMMY BIT(28)
+#define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8)
+#define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7)
+#define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27)
+#define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17)
+#define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7)
+
+/**
+ * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table
+ * @nor: pointer to a 'struct spi_nor'
+ * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the Profile 1.0 Table length and version.
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_profile1(struct spi_nor *nor,
+ const struct sfdp_parameter_header *profile1_header,
+ struct spi_nor_flash_parameter *params)
+{
+ u32 *dwords, addr;
+ size_t len;
+ int ret;
+ u8 dummy, opcode;
+
+ len = profile1_header->length * sizeof(*dwords);
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(profile1_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ le32_to_cpu_array(dwords, profile1_header->length);
+
+ /* Get 8D-8D-8D fast read opcode and dummy cycles. */
+ opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[0]);
+
+ /* Set the Read Status Register dummy cycles and dummy address bytes. */
+ if (dwords[0] & PROFILE1_DWORD1_RDSR_DUMMY)
+ params->rdsr_dummy = 8;
+ else
+ params->rdsr_dummy = 4;
+
+ if (dwords[0] & PROFILE1_DWORD1_RDSR_ADDR_BYTES)
+ params->rdsr_addr_nbytes = 4;
+ else
+ params->rdsr_addr_nbytes = 0;
+
+ /*
+ * We don't know what speed the controller is running at. Find the
+ * dummy cycles for the fastest frequency the flash can run at to be
+ * sure we are never short of dummy cycles. A value of 0 means the
+ * frequency is not supported.
+ *
+ * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let
+ * flashes set the correct value if needed in their fixup hooks.
+ */
+ dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[3]);
+ if (!dummy)
+ dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, dwords[4]);
+ if (!dummy)
+ dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, dwords[4]);
+ if (!dummy)
+ dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, dwords[4]);
+ if (!dummy)
+ dev_dbg(nor->dev,
+ "Can't find dummy cycles from Profile 1.0 table\n");
+
+ /* Round up to an even value to avoid tripping controllers up. */
+ dummy = round_up(dummy, 2);
+
+ /* Update the fast read settings. */
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, dummy, opcode,
+ SNOR_PROTO_8_8_8_DTR);
+
+out:
+ kfree(dwords);
+ return ret;
+}
+
+#define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31)
+
+/**
+ * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register
+ * Map.
+ * @nor: pointer to a 'struct spi_nor'
+ * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the SCCR Map table length and version.
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_sccr(struct spi_nor *nor,
+ const struct sfdp_parameter_header *sccr_header,
+ struct spi_nor_flash_parameter *params)
+{
+ u32 *dwords, addr;
+ size_t len;
+ int ret;
+
+ len = sccr_header->length * sizeof(*dwords);
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(sccr_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ le32_to_cpu_array(dwords, sccr_header->length);
+
+ if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22]))
+ nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
+
+out:
+ kfree(dwords);
+ return ret;
+}
+
/**
* spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
@@ -1184,6 +1344,14 @@ int spi_nor_parse_sfdp(struct spi_nor *nor,
err = spi_nor_parse_4bait(nor, param_header, params);
break;
+ case SFDP_PROFILE1_ID:
+ err = spi_nor_parse_profile1(nor, param_header, params);
+ break;
+
+ case SFDP_SCCR_MAP_ID:
+ err = spi_nor_parse_sccr(nor, param_header, params);
+ break;
+
default:
break;
}
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
index 7f9846b3a1ad..89152ae1cf3e 100644
--- a/drivers/mtd/spi-nor/sfdp.h
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -90,6 +90,14 @@ struct sfdp_bfpt {
#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
+#define BFPT_DWORD16_SWRST_EN_RST BIT(12)
+
+#define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29)
+#define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */
+#define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */
+#define BFPT_DWORD18_CMD_EXT_RES (0x2UL << 29) /* Reserved */
+#define BFPT_DWORD18_CMD_EXT_16B (0x3UL << 29) /* 16-bit opcode */
+
struct sfdp_parameter_header {
u8 id_lsb;
u8 minor;
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 8429b4af999a..b0c5521c1e27 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -8,6 +8,173 @@
#include "core.h"
+#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
+#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
+#define SPINOR_REG_CYPRESS_CFR2V 0x00800003
+#define SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24 0xb
+#define SPINOR_REG_CYPRESS_CFR3V 0x00800004
+#define SPINOR_REG_CYPRESS_CFR3V_PGSZ BIT(4) /* Page size. */
+#define SPINOR_REG_CYPRESS_CFR5V 0x00800006
+#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN 0x3
+#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS 0
+#define SPINOR_OP_CYPRESS_RD_FAST 0xee
+
+/**
+ * spi_nor_cypress_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
+ * @nor: pointer to a 'struct spi_nor'
+ * @enable: whether to enable or disable Octal DTR
+ *
+ * This also sets the memory access latency cycles to 24 to allow the flash to
+ * run at up to 200MHz.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+ int ret;
+
+ if (enable) {
+ /* Use 24 dummy cycles for memory array reads. */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ *buf = SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24;
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(3, SPINOR_REG_CYPRESS_CFR2V,
+ 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, buf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ nor->read_dummy = 24;
+ }
+
+ /* Set/unset the octal and DTR enable bits. */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (enable)
+ *buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
+ else
+ *buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS;
+
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(enable ? 3 : 4,
+ SPINOR_REG_CYPRESS_CFR5V,
+ 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, buf, 1));
+
+ if (!enable)
+ spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
+ SPI_MEM_OP_ADDR(enable ? 4 : 0, 0, 1),
+ SPI_MEM_OP_DUMMY(enable ? 3 : 0, 1),
+ SPI_MEM_OP_DATA_IN(round_up(nor->info->id_len, 2),
+ buf, 1));
+
+ if (enable)
+ spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (memcmp(buf, nor->info->id, nor->info->id_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void s28hs512t_default_init(struct spi_nor *nor)
+{
+ nor->params->octal_dtr_enable = spi_nor_cypress_octal_dtr_enable;
+ nor->params->writesize = 16;
+}
+
+static void s28hs512t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ /*
+ * On older versions of the flash the xSPI Profile 1.0 table has the
+ * 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE.
+ */
+ if (nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
+ nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
+ SPINOR_OP_CYPRESS_RD_FAST;
+
+ /* This flash is also missing the 4-byte Page Program opcode bit. */
+ spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
+
+ /*
+ * The xSPI Profile 1.0 table advertises the number of additional
+ * address bytes needed for Read Status Register command as 0 but the
+ * actual value for that is 4.
+ */
+ nor->params->rdsr_addr_nbytes = 4;
+}
+
+static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * The BFPT table advertises a 512B page size but the page size is
+ * actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ */
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(3, SPINOR_REG_CYPRESS_CFR3V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ int ret;
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
+ params->page_size = 512;
+ else
+ params->page_size = 256;
+
+ return 0;
+}
+
+static struct spi_nor_fixups s28hs512t_fixups = {
+ .default_init = s28hs512t_default_init,
+ .post_sfdp = s28hs512t_post_sfdp_fixup,
+ .post_bfpt = s28hs512t_post_bfpt_fixup,
+};
+
static int
s25fs_s_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
@@ -104,6 +271,11 @@ static const struct flash_info spansion_parts[] = {
SPI_NOR_4B_OPCODES) },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1,
SPI_NOR_NO_ERASE) },
+ { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256,
+ SECT_4K | SPI_NOR_OCTAL_DTR_READ |
+ SPI_NOR_OCTAL_DTR_PP)
+ .fixups = &s28hs512t_fixups,
+ },
};
static void spansion_post_sfdp_fixups(struct spi_nor *nor)
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index e0af6d25d573..00e48da0744a 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -11,26 +11,28 @@
static const struct flash_info sst_parts[] = {
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8,
- SECT_4K | SST_WRITE) },
+ SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16,
- SECT_4K | SST_WRITE) },
+ SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32,
- SECT_4K | SST_WRITE) },
+ SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64,
- SECT_4K | SST_WRITE) },
- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_4BIT_BP | SPI_NOR_HAS_LOCK |
+ SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1,
- SECT_4K | SST_WRITE) },
+ SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2,
- SECT_4K | SST_WRITE) },
+ SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4,
- SECT_4K | SST_WRITE) },
- { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
- { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
+ SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8,
- SECT_4K | SST_WRITE) },
+ SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16,
- SECT_4K | SST_WRITE) },
+ SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
@@ -127,11 +129,6 @@ out:
return ret;
}
-static void sst_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
static void sst_post_sfdp_fixups(struct spi_nor *nor)
{
if (nor->info->flags & SST_WRITE)
@@ -139,7 +136,6 @@ static void sst_post_sfdp_fixups(struct spi_nor *nor)
}
static const struct spi_nor_fixups sst_fixups = {
- .default_init = sst_default_init,
.post_sfdp = sst_post_sfdp_fixups,
};
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 13bca9ea0cae..c4f271314f52 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -8,7 +8,7 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/slab.h>
-#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include "mtd_test.h"
@@ -119,13 +119,13 @@ static void no_bit_error(void *error_data, void *error_ecc,
static int no_bit_error_verify(void *error_data, void *error_ecc,
void *correct_data, const size_t size)
{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
unsigned char calc_ecc[3];
int ret;
- __nand_calculate_ecc(error_data, size, calc_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
- ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
+ ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order);
+ ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size,
+ sm_order);
if (ret == 0 && !memcmp(correct_data, error_data, size))
return 0;
@@ -149,13 +149,13 @@ static void single_bit_error_in_ecc(void *error_data, void *error_ecc,
static int single_bit_error_correct(void *error_data, void *error_ecc,
void *correct_data, const size_t size)
{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
unsigned char calc_ecc[3];
int ret;
- __nand_calculate_ecc(error_data, size, calc_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
- ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
+ ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order);
+ ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size,
+ sm_order);
if (ret == 1 && !memcmp(correct_data, error_data, size))
return 0;
@@ -186,13 +186,13 @@ static void double_bit_error_in_ecc(void *error_data, void *error_ecc,
static int double_bit_error_detect(void *error_data, void *error_ecc,
void *correct_data, const size_t size)
{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
unsigned char calc_ecc[3];
int ret;
- __nand_calculate_ecc(error_data, size, calc_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
- ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
+ ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order);
+ ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size,
+ sm_order);
return (ret == -EBADMSG) ? 0 : -EINVAL;
}
@@ -248,6 +248,7 @@ static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data,
static int nand_ecc_test_run(const size_t size)
{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
int i;
int err = 0;
void *error_data;
@@ -266,9 +267,7 @@ static int nand_ecc_test_run(const size_t size)
}
prandom_bytes(correct_data, size);
- __nand_calculate_ecc(correct_data, size, correct_ecc,
- IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
-
+ ecc_sw_hamming_calculate(correct_data, size, correct_ecc, sm_order);
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
nand_ecc_test[i].prepare(error_data, error_ecc,
correct_data, correct_ecc, size);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index e85b04e9716b..f399edc82191 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -50,6 +50,7 @@
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD character device node path, MTD device name, or MTD device number
* string
+ * @ubi_num: UBI number
* @vid_hdr_offs: VID header offset
* @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
*/
@@ -628,10 +629,8 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
}
- if (ubi->mtd->type == MTD_NORFLASH) {
- ubi_assert(ubi->mtd->writesize == 1);
+ if (ubi->mtd->type == MTD_NORFLASH)
ubi->nor_flash = 1;
- }
ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
@@ -1351,8 +1350,6 @@ static int bytes_str_to_int(const char *str)
fallthrough;
case 'K':
result *= 1024;
- if (endp[1] == 'i' && endp[2] == 'B')
- endp += 2;
case '\0':
break;
default:
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 0edecfdbd01f..892494c8cb7c 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1290,7 +1290,7 @@ static int is_error_sane(int err)
* @ubi: UBI device description object
* @from: physical eraseblock number from where to copy
* @to: physical eraseblock number where to copy
- * @vid_hdr: VID header of the @from physical eraseblock
+ * @vidb: data structure from where the VID header is derived
*
* This function copies logical eraseblock from physical eraseblock @from to
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
@@ -1463,6 +1463,7 @@ out_unlock_leb:
/**
* print_rsvd_warning - warn about not having enough reserved PEBs.
* @ubi: UBI device description object
+ * @ai: UBI attach info object
*
* This is a helper function for 'ubi_eba_init()' which is called when UBI
* cannot reserve enough PEBs for bad block handling. This function makes a
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index cc547b37cace..1b980d15d9fb 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -439,7 +439,7 @@ static int gluebi_resized(struct ubi_volume_info *vi)
* gluebi_notify - UBI notification handler.
* @nb: registered notifier block
* @l: notification type
- * @ptr: pointer to the &struct ubi_notification object
+ * @ns_ptr: pointer to the &struct ubi_notification object
*/
static int gluebi_notify(struct notifier_block *nb, unsigned long l,
void *ns_ptr)
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 14d890b00d2c..2f3312c31e51 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -535,7 +535,14 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
return -EROFS;
}
- if (ubi->nor_flash) {
+ /*
+ * If the flash is ECC-ed then we have to erase the ECC block before we
+ * can write to it. But the write is in preparation to an erase in the
+ * first place. This means we cannot zero out EC and VID before the
+ * erase and we just have to hope the flash starts erasing from the
+ * start of the page.
+ */
+ if (ubi->nor_flash && ubi->mtd->writesize == 1) {
err = nor_erase_prepare(ubi, pnum);
if (err)
return err;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 9718f5aaaf69..0fce99ff29b5 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -450,7 +450,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* ubi_leb_read_sg - read data into a scatter gather list.
* @desc: volume descriptor
* @lnum: logical eraseblock number to read from
- * @buf: buffer where to store the read data
+ * @sgl: UBI scatter gather list to store the read data
* @offset: offset within the logical eraseblock to read from
* @len: how many bytes to read
* @check: whether UBI has to check the read data's CRC or not.
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 7847de75a74c..8455f1d47f3c 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -575,6 +575,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
+ * @nested: denotes whether the work_sem is already held in read mode
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
@@ -1063,8 +1064,6 @@ out_unlock:
* __erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
- * @shutdown: non-zero if the worker has to free memory and exit
- * because the WL sub-system is shutting down
*
* This function erases a physical eraseblock and perform torture testing if
* needed. It also takes care about marking the physical eraseblock bad if
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 98df38fe553c..12d085405bd0 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -332,7 +332,7 @@ static int __init arc_rimi_init(void)
dev->irq = 9;
if (arcrimi_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -349,7 +349,7 @@ static void __exit arc_rimi_exit(void)
iounmap(lp->mem_start);
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
#ifndef MODULE
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 22a49c6d7ae6..5d4a4c7efbbf 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -298,6 +298,10 @@ struct arcnet_local {
int excnak_pending; /* We just got an excesive nak interrupt */
+ /* RESET flag handling */
+ int reset_in_progress;
+ struct work_struct reset_work;
+
struct {
uint16_t sequence; /* sequence number (incs with each packet) */
__be16 aborted_seq;
@@ -350,7 +354,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc)
void arcnet_unregister_proto(struct ArcProto *proto);
irqreturn_t arcnet_interrupt(int irq, void *dev_id);
+
struct net_device *alloc_arcdev(const char *name);
+void free_arcdev(struct net_device *dev);
int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index e04efc0a5c97..d76dd7d14299 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -387,10 +387,44 @@ static void arcnet_timer(struct timer_list *t)
struct arcnet_local *lp = from_timer(lp, t, timer);
struct net_device *dev = lp->dev;
- if (!netif_carrier_ok(dev)) {
+ spin_lock_irq(&lp->lock);
+
+ if (!lp->reset_in_progress && !netif_carrier_ok(dev)) {
netif_carrier_on(dev);
netdev_info(dev, "link up\n");
}
+
+ spin_unlock_irq(&lp->lock);
+}
+
+static void reset_device_work(struct work_struct *work)
+{
+ struct arcnet_local *lp;
+ struct net_device *dev;
+
+ lp = container_of(work, struct arcnet_local, reset_work);
+ dev = lp->dev;
+
+ /* Do not bring the network interface back up if an ifdown
+ * was already done.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ return;
+
+ rtnl_lock();
+
+ /* Do another check, in case of an ifdown that was triggered in
+ * the small race window between the exit condition above and
+ * acquiring RTNL.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ goto out;
+
+ dev_close(dev);
+ dev_open(dev, NULL);
+
+out:
+ rtnl_unlock();
}
static void arcnet_reply_tasklet(unsigned long data)
@@ -452,12 +486,25 @@ struct net_device *alloc_arcdev(const char *name)
lp->dev = dev;
spin_lock_init(&lp->lock);
timer_setup(&lp->timer, arcnet_timer, 0);
+ INIT_WORK(&lp->reset_work, reset_device_work);
}
return dev;
}
EXPORT_SYMBOL(alloc_arcdev);
+void free_arcdev(struct net_device *dev)
+{
+ struct arcnet_local *lp = netdev_priv(dev);
+
+ /* Do not cancel this at ->ndo_close(), as the workqueue itself
+ * indirectly calls the ifdown path through dev_close().
+ */
+ cancel_work_sync(&lp->reset_work);
+ free_netdev(dev);
+}
+EXPORT_SYMBOL(free_arcdev);
+
/* Open/initialize the board. This is called sometime after booting when
* the 'ifconfig' program is run.
*
@@ -587,6 +634,10 @@ int arcnet_close(struct net_device *dev)
/* shut down the card */
lp->hw.close(dev);
+
+ /* reset counters */
+ lp->reset_in_progress = 0;
+
module_put(lp->hw.owner);
return 0;
}
@@ -820,6 +871,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&lp->lock, flags);
+ if (lp->reset_in_progress)
+ goto out;
+
/* RESET flag was enabled - if device is not running, we must
* clear it right away (but nothing else).
*/
@@ -852,11 +906,14 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
if (status & RESETflag) {
arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n",
status);
- arcnet_close(dev);
- arcnet_open(dev);
+
+ lp->reset_in_progress = 1;
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ schedule_work(&lp->reset_work);
/* get out of the interrupt handler! */
- break;
+ goto out;
}
/* RX is inhibited - we must have received something.
* Prepare to receive into the next buffer.
@@ -1052,6 +1109,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
udelay(1);
lp->hw.intmask(dev, lp->intmask);
+out:
spin_unlock_irqrestore(&lp->lock, flags);
return retval;
}
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index f983c4ce6b07..be618e4b9ed5 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -169,7 +169,7 @@ static int __init com20020_init(void)
dev->irq = 9;
if (com20020isa_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -182,7 +182,7 @@ static void __exit com20020_exit(void)
unregister_netdev(my_dev);
free_irq(my_dev->irq, my_dev);
release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(my_dev);
+ free_arcdev(my_dev);
}
#ifndef MODULE
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index eb7f76753c9c..8bdc44b7e09a 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -291,7 +291,7 @@ static void com20020pci_remove(struct pci_dev *pdev)
unregister_netdev(dev);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index cf607ffcf358..9cc5eb6a8e90 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -177,7 +177,7 @@ static void com20020_detach(struct pcmcia_device *link)
dev = info->dev;
if (dev) {
dev_dbg(&link->dev, "kfree...\n");
- free_netdev(dev);
+ free_arcdev(dev);
}
dev_dbg(&link->dev, "kfree2...\n");
kfree(info);
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index cf214b730671..3856b447d38e 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -396,7 +396,7 @@ static int __init com90io_init(void)
err = com90io_probe(dev);
if (err) {
- free_netdev(dev);
+ free_arcdev(dev);
return err;
}
@@ -419,7 +419,7 @@ static void __exit com90io_exit(void)
free_irq(dev->irq, dev);
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(dev);
+ free_arcdev(dev);
}
module_init(com90io_init)
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 3dc3d533cb19..d8dfb9ea0de8 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -554,7 +554,7 @@ err_free_irq:
err_release_mem:
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
err_free_dev:
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -672,7 +672,7 @@ static void __exit com90xx_exit(void)
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
release_mem_region(dev->mem_start,
dev->mem_end - dev->mem_start + 1);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 85ebd2b7e446..85de5f96c02b 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -380,7 +380,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
goto free_dst;
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
- BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
@@ -534,6 +534,7 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
@@ -644,11 +645,20 @@ static int bareudp_link_config(struct net_device *dev,
return 0;
}
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ list_del(&bareudp->next);
+ unregister_netdevice_queue(dev, head);
+}
+
static int bareudp_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct bareudp_conf conf;
+ LIST_HEAD(list_kill);
int err;
err = bareudp2info(data, &conf, extack);
@@ -661,17 +671,14 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
err = bareudp_link_config(dev, tb);
if (err)
- return err;
+ goto err_unconfig;
return 0;
-}
-
-static void bareudp_dellink(struct net_device *dev, struct list_head *head)
-{
- struct bareudp_dev *bareudp = netdev_priv(dev);
- list_del(&bareudp->next);
- unregister_netdevice_queue(dev, head);
+err_unconfig:
+ bareudp_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return err;
}
static size_t bareudp_get_size(const struct net_device *dev)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 424970939fd4..1c28eade6bec 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
+ select CRC32
help
This is a driver for the Kvaser PCI Express CAN FD family.
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3486704c8a95..c73e2a65c904 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -592,11 +592,11 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx_ni(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->len;
+ netif_rx_ni(skb);
+
restart:
netdev_dbg(dev, "restarted\n");
priv->can_stats.restarts++;
@@ -1163,7 +1163,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
- struct can_berr_counter bec;
+ struct can_berr_counter bec = { };
enum can_state state = priv->state;
if (priv->do_get_state)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2c9f12401276..da551fd0f502 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1852,8 +1852,6 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
unregister_candev(cdev->net);
-
- m_can_clk_stop(cdev);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index 24c737c4fc44..970f0e9d19bf 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -131,30 +131,6 @@ static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
}
-static struct can_bittiming_const tcan4x5x_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 2,
- .tseg1_max = 31,
- .tseg2_min = 2,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 1,
- .tseg1_max = 32,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
{
int wake_state = 0;
@@ -469,8 +445,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
mcan_class->is_peripheral = true;
- mcan_class->bit_timing = &tcan4x5x_bittiming_const;
- mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
mcan_class->net->irq = spi->irq;
spi_set_drvdata(spi, priv);
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
index 8d36101b78e3..29cabc20109e 100644
--- a/drivers/net/can/rcar/Kconfig
+++ b/drivers/net/can/rcar/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
- tristate "Renesas R-Car CAN controller"
+ tristate "Renesas R-Car and RZ/G CAN controller"
depends on ARCH_RENESAS || ARM
help
Say Y here if you want to use CAN controller found on Renesas R-Car
- SoCs.
+ or RZ/G SoCs.
To compile this driver as a module, choose M here: the module will
be called rcar_can.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 77129d5f410b..f07e8b737d31 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1368,13 +1368,10 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct spi_transfer *last_xfer;
- tx_ring->tail += len;
-
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
- */
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1391,6 +1388,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
if (err)
return err;
+ tx_ring->tail += len;
+
err = mcp251xfd_check_tef_tail(priv);
if (err)
return err;
@@ -1492,7 +1491,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
else
skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
- if (!cfd) {
+ if (!skb) {
stats->rx_dropped++;
return 0;
}
@@ -1553,10 +1552,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
/* Increment the RX FIFO tail pointer 'len' times in a
* single SPI message.
- */
- ring->tail += len;
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1572,6 +1569,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
last_xfer->cs_change = 1;
if (err)
return err;
+
+ ring->tail += len;
}
return 0;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 61631f4fd92a..f347ecc79aef 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -514,11 +514,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
else
memcpy(cfd->data, rm->d, cfd->len);
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+
return 0;
}
@@ -580,11 +580,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+
return 0;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index fa47bab510bb..f9a524c5f6d6 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *peer;
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
+ u8 len;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ len = cfd->len;
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
- srcstats->tx_bytes += cfd->len;
+ srcstats->tx_bytes += len;
peerstats = &peer->stats;
peerstats->rx_packets++;
- peerstats->rx_bytes += cfd->len;
+ peerstats->rx_bytes += len;
}
out_unlock:
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 288b5a5c3e0d..95c7fa171e35 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1404,7 +1404,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
return -EINVAL;
- if (vlan->vid_end > dev->num_vlans)
+ if (vlan->vid_end >= dev->num_vlans)
return -ERANGE;
b53_enable_vlan(dev, true, ds->vlan_filtering);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 1e9a0adda2d6..445226720ff2 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -509,15 +509,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
/* Find our integrated MDIO bus node */
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
- if (!priv->master_mii_bus)
+ if (!priv->master_mii_bus) {
+ of_node_put(dn);
return -EPROBE_DEFER;
+ }
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
- if (!priv->slave_mii_bus)
+ if (!priv->slave_mii_bus) {
+ of_node_put(dn);
return -ENOMEM;
+ }
priv->slave_mii_bus->priv = priv;
priv->slave_mii_bus->name = "sf2 slave mii";
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
index 222dd35e2c9d..e01191107a4b 100644
--- a/drivers/net/dsa/hirschmann/Kconfig
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -4,6 +4,7 @@ config NET_DSA_HIRSCHMANN_HELLCREEK
depends on HAS_IOMEM
depends on NET_DSA
depends on PTP_1588_CLOCK
+ depends on LEDS_CLASS
select NET_DSA_TAG_HELLCREEK
help
This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 09701c17f3f6..662e68a0e7e6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -92,9 +92,7 @@
GSWIP_MDIO_PHY_FDUP_MASK)
/* GSWIP MII Registers */
-#define GSWIP_MII_CFG0 0x00
-#define GSWIP_MII_CFG1 0x02
-#define GSWIP_MII_CFG5 0x04
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
@@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- switch (port) {
- case 0:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
- break;
- case 1:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
- break;
- case 5:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
- break;
- }
+ /* There's no MII_CFG register for the CPU port */
+ if (!dsa_is_cpu_port(priv->ds, port))
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@@ -822,9 +812,8 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII link */
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1447,11 +1436,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
+ /* With the exclusion of MII, Reverse MII and Reduced MII, we
+ * support Gigabit, including Half duplex
*/
if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_RMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseT_Half);
}
@@ -1541,9 +1531,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
- /* Enable the xMII interface only for the external PHY */
- if (interface != PHY_INTERFACE_MODE_INTERNAL)
- gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+ gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c973db101b72..a4570ba29c83 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1187,6 +1187,20 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
.port_cnt = 5, /* total cpu and user ports */
},
{
+ /*
+ * WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ */
.chip_id = 0x8794,
.dev_name = "KSZ8794",
.num_vlans = 4096,
@@ -1220,9 +1234,13 @@ static int ksz8795_switch_init(struct ksz_device *dev)
dev->num_vlans = chip->num_vlans;
dev->num_alus = chip->num_alus;
dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
+ dev->port_cnt = fls(chip->cpu_ports);
+ dev->cpu_port = fls(chip->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->port_cnt - 1;
dev->cpu_ports = chip->cpu_ports;
-
+ dev->host_mask = chip->cpu_ports;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
+ chip->cpu_ports;
break;
}
}
@@ -1231,17 +1249,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
if (!dev->cpu_ports)
return -ENODEV;
- dev->port_mask = BIT(dev->port_cnt) - 1;
- dev->port_mask |= dev->host_mask;
-
dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
dev->mib_cnt = ARRAY_SIZE(mib_names);
- dev->phy_port_cnt = dev->port_cnt - 1;
-
- dev->cpu_port = dev->port_cnt - 1;
- dev->host_mask = BIT(dev->cpu_port);
-
dev->ports = devm_kzalloc(dev->dev,
dev->port_cnt * sizeof(struct ksz_port),
GFP_KERNEL);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index cf743133b0b9..389abfd27770 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -400,7 +400,7 @@ int ksz_switch_register(struct ksz_device *dev,
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
- usleep_range(100, 1000);
+ msleep(100);
}
mutex_init(&dev->dev_mutex);
@@ -434,7 +434,7 @@ int ksz_switch_register(struct ksz_device *dev,
if (of_property_read_u32(port, "reg",
&port_num))
continue;
- if (port_num >= dev->port_cnt)
+ if (!(dev->port_mask & BIT(port_num)))
return -EINVAL;
of_get_phy_mode(port,
&dev->ports[port_num].interface);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index eafe6bedc692..54aa942eedaa 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1676,7 +1676,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!entry.portvec)
entry.state = 0;
} else {
- entry.portvec |= BIT(port);
+ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
+ entry.portvec = BIT(port);
+ else
+ entry.portvec |= BIT(port);
+
entry.state = state;
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 66ddf67b8737..7b96396be609 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -351,6 +351,10 @@ int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ err = mv88e6185_g1_stu_data_read(chip, entry);
+ if (err)
+ return err;
+
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[5:4] are located in VTU Operation 9:8
*/
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index e24a99031b80..4d49c5f2b790 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -159,6 +159,8 @@ struct ar9331_sw_priv {
struct dsa_switch ds;
struct dsa_switch_ops ops;
struct irq_domain *irqdomain;
+ u32 irq_mask;
+ struct mutex lock_irq;
struct mii_bus *mbus; /* mdio master */
struct mii_bus *sbus; /* mdio slave */
struct regmap *regmap;
@@ -520,32 +522,44 @@ static irqreturn_t ar9331_sw_irq(int irq, void *data)
static void ar9331_sw_mask_irq(struct irq_data *d)
{
struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
- struct regmap *regmap = priv->regmap;
- int ret;
- ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
- AR9331_SW_GINT_PHY_INT, 0);
- if (ret)
- dev_err(priv->dev, "could not mask IRQ\n");
+ priv->irq_mask = 0;
}
static void ar9331_sw_unmask_irq(struct irq_data *d)
{
struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_mask = AR9331_SW_GINT_PHY_INT;
+}
+
+static void ar9331_sw_irq_bus_lock(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&priv->lock_irq);
+}
+
+static void ar9331_sw_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
struct regmap *regmap = priv->regmap;
int ret;
ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
- AR9331_SW_GINT_PHY_INT,
- AR9331_SW_GINT_PHY_INT);
+ AR9331_SW_GINT_PHY_INT, priv->irq_mask);
if (ret)
- dev_err(priv->dev, "could not unmask IRQ\n");
+ dev_err(priv->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&priv->lock_irq);
}
static struct irq_chip ar9331_sw_irq_chip = {
.name = AR9331_SW_NAME,
.irq_mask = ar9331_sw_mask_irq,
.irq_unmask = ar9331_sw_unmask_irq,
+ .irq_bus_lock = ar9331_sw_irq_bus_lock,
+ .irq_bus_sync_unlock = ar9331_sw_irq_bus_sync_unlock,
};
static int ar9331_sw_irq_map(struct irq_domain *domain, unsigned int irq,
@@ -584,6 +598,7 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
return irq ? irq : -EINVAL;
}
+ mutex_init(&priv->lock_irq);
ret = devm_request_threaded_irq(dev, irq, NULL, ar9331_sw_irq,
IRQF_ONESHOT, AR9331_SW_NAME, priv);
if (ret) {
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 862ea44beea7..5ed80d9a6b9f 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -828,13 +828,13 @@ static int emac_probe(struct platform_device *pdev)
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
- goto out_iounmap;
+ goto out_dispose_mapping;
}
ret = clk_prepare_enable(db->clk);
if (ret) {
dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
- goto out_iounmap;
+ goto out_dispose_mapping;
}
ret = sunxi_sram_claim(&pdev->dev);
@@ -893,6 +893,8 @@ out_release_sram:
sunxi_sram_release(&pdev->dev);
out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
+out_dispose_mapping:
+ irq_dispose_mapping(ndev->irq);
out_iounmap:
iounmap(db->membase);
out:
@@ -911,6 +913,7 @@ static int emac_remove(struct platform_device *pdev)
unregister_netdev(ndev);
sunxi_sram_release(&pdev->dev);
clk_disable_unprepare(db->clk);
+ irq_dispose_mapping(ndev->irq);
iounmap(db->membase);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index db7d956a9c9b..d0b0609bbe23 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -8,7 +8,7 @@ config NET_VENDOR_AMD
default y
depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
- (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64
+ ISA || EISA || PCMCIA || ARM64
help
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -75,14 +75,6 @@ config ARIADNE
To compile this driver as a module, choose M here: the module
will be called ariadne.
-config ARM_AM79C961A
- bool "ARM EBSA110 AM79C961A support"
- depends on ARM && ARCH_EBSA110
- select CRC32
- help
- If you wish to compile a kernel for the EBSA-110, then you should
- always answer Y to this.
-
config ATARILANCE
tristate "Atari LANCE support"
depends on ATARI
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index 45f86822a5f7..0d2f478b49a5 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_A2065) += a2065.o
obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
-obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
obj-$(CONFIG_ARIADNE) += ariadne.o
obj-$(CONFIG_ATARILANCE) += atarilance.o
obj-$(CONFIG_DECLANCE) += declance.o
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
deleted file mode 100644
index 1c53408f5d47..000000000000
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ /dev/null
@@ -1,763 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/net/ethernet/amd/am79c961a.c
- *
- * by Russell King <rmk@arm.linux.org.uk> 1995-2001.
- *
- * Derived from various things including skeleton.c
- *
- * This is a special driver for the am79c961A Lance chip used in the
- * Intel (formally Digital Equipment Corp) EBSA110 platform. Please
- * note that this can not be built as a module (it doesn't make sense).
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/bitops.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-
-#define TX_BUFFERS 15
-#define RX_BUFFERS 25
-
-#include "am79c961a.h"
-
-static irqreturn_t
-am79c961_interrupt (int irq, void *dev_id);
-
-static unsigned int net_debug = NET_DEBUG;
-
-static const char version[] =
- "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
-
-/* --------------------------------------------------------------------------- */
-
-#ifdef __arm__
-static void write_rreg(u_long base, u_int reg, u_int val)
-{
- asm volatile(
- "strh %1, [%2] @ NET_RAP\n\t"
- "strh %0, [%2, #-4] @ NET_RDP"
- :
- : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
-}
-
-static inline unsigned short read_rreg(u_long base_addr, u_int reg)
-{
- unsigned short v;
- asm volatile(
- "strh %1, [%2] @ NET_RAP\n\t"
- "ldrh %0, [%2, #-4] @ NET_RDP"
- : "=r" (v)
- : "r" (reg), "r" (ISAIO_BASE + 0x0464));
- return v;
-}
-
-static inline void write_ireg(u_long base, u_int reg, u_int val)
-{
- asm volatile(
- "strh %1, [%2] @ NET_RAP\n\t"
- "strh %0, [%2, #8] @ NET_IDP"
- :
- : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
-}
-
-static inline unsigned short read_ireg(u_long base_addr, u_int reg)
-{
- u_short v;
- asm volatile(
- "strh %1, [%2] @ NAT_RAP\n\t"
- "ldrh %0, [%2, #8] @ NET_IDP\n\t"
- : "=r" (v)
- : "r" (reg), "r" (ISAIO_BASE + 0x0464));
- return v;
-}
-
-#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
-#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
-
-static void
-am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
-{
- offset = ISAMEM_BASE + (offset << 1);
- length = (length + 1) & ~1;
- if ((int)buf & 2) {
- asm volatile("strh %2, [%0], #4"
- : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
- buf += 2;
- length -= 2;
- }
- while (length > 8) {
- register unsigned int tmp asm("r2"), tmp2 asm("r3");
- asm volatile(
- "ldmia %0!, {%1, %2}"
- : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
- length -= 8;
- asm volatile(
- "strh %1, [%0], #4\n\t"
- "mov %1, %1, lsr #16\n\t"
- "strh %1, [%0], #4\n\t"
- "strh %2, [%0], #4\n\t"
- "mov %2, %2, lsr #16\n\t"
- "strh %2, [%0], #4"
- : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
- }
- while (length > 0) {
- asm volatile("strh %2, [%0], #4"
- : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
- buf += 2;
- length -= 2;
- }
-}
-
-static void
-am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
-{
- offset = ISAMEM_BASE + (offset << 1);
- length = (length + 1) & ~1;
- if ((int)buf & 2) {
- unsigned int tmp;
- asm volatile(
- "ldrh %2, [%0], #4\n\t"
- "strb %2, [%1], #1\n\t"
- "mov %2, %2, lsr #8\n\t"
- "strb %2, [%1], #1"
- : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
- length -= 2;
- }
- while (length > 8) {
- register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
- asm volatile(
- "ldrh %2, [%0], #4\n\t"
- "ldrh %4, [%0], #4\n\t"
- "ldrh %3, [%0], #4\n\t"
- "orr %2, %2, %4, lsl #16\n\t"
- "ldrh %4, [%0], #4\n\t"
- "orr %3, %3, %4, lsl #16\n\t"
- "stmia %1!, {%2, %3}"
- : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
- : "0" (offset), "1" (buf));
- length -= 8;
- }
- while (length > 0) {
- unsigned int tmp;
- asm volatile(
- "ldrh %2, [%0], #4\n\t"
- "strb %2, [%1], #1\n\t"
- "mov %2, %2, lsr #8\n\t"
- "strb %2, [%1], #1"
- : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
- length -= 2;
- }
-}
-#else
-#error Not compatible
-#endif
-
-static int
-am79c961_ramtest(struct net_device *dev, unsigned int val)
-{
- unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
- int i, error = 0, errorcount = 0;
-
- if (!buffer)
- return 0;
- memset (buffer, val, 65536);
- am_writebuffer(dev, 0, buffer, 65536);
- memset (buffer, val ^ 255, 65536);
- am_readbuffer(dev, 0, buffer, 65536);
- for (i = 0; i < 65536; i++) {
- if (buffer[i] != val && !error) {
- printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
- error = 1;
- errorcount ++;
- } else if (error && buffer[i] == val) {
- printk ("%05X\n", i);
- error = 0;
- }
- }
- if (error)
- printk ("10000\n");
- kfree (buffer);
- return errorcount;
-}
-
-static void am79c961_mc_hash(char *addr, u16 *hash)
-{
- int idx, bit;
- u32 crc;
-
- crc = ether_crc_le(ETH_ALEN, addr);
-
- idx = crc >> 30;
- bit = (crc >> 26) & 15;
-
- hash[idx] |= 1 << bit;
-}
-
-static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
-{
- unsigned int mode = MODE_PORT_10BT;
-
- if (dev->flags & IFF_PROMISC) {
- mode |= MODE_PROMISC;
- memset(hash, 0xff, 4 * sizeof(*hash));
- } else if (dev->flags & IFF_ALLMULTI) {
- memset(hash, 0xff, 4 * sizeof(*hash));
- } else {
- struct netdev_hw_addr *ha;
-
- memset(hash, 0, 4 * sizeof(*hash));
-
- netdev_for_each_mc_addr(ha, dev)
- am79c961_mc_hash(ha->addr, hash);
- }
-
- return mode;
-}
-
-static void
-am79c961_init_for_open(struct net_device *dev)
-{
- struct dev_priv *priv = netdev_priv(dev);
- unsigned long flags;
- unsigned char *p;
- u_int hdr_addr, first_free_addr;
- u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
- int i;
-
- /*
- * Stop the chip.
- */
- spin_lock_irqsave(&priv->chip_lock, flags);
- write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
- spin_unlock_irqrestore(&priv->chip_lock, flags);
-
- write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
- write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
- write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
- write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
-
- for (i = LADRL; i <= LADRH; i++)
- write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
-
- for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
- write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
-
- write_rreg (dev->base_addr, MODE, mode);
- write_rreg (dev->base_addr, POLLINT, 0);
- write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
- write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
-
- first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
- hdr_addr = 0;
-
- priv->rxhead = 0;
- priv->rxtail = 0;
- priv->rxhdr = hdr_addr;
-
- for (i = 0; i < RX_BUFFERS; i++) {
- priv->rxbuffer[i] = first_free_addr;
- am_writeword (dev, hdr_addr, first_free_addr);
- am_writeword (dev, hdr_addr + 2, RMD_OWN);
- am_writeword (dev, hdr_addr + 4, (-1600));
- am_writeword (dev, hdr_addr + 6, 0);
- first_free_addr += 1600;
- hdr_addr += 8;
- }
- priv->txhead = 0;
- priv->txtail = 0;
- priv->txhdr = hdr_addr;
- for (i = 0; i < TX_BUFFERS; i++) {
- priv->txbuffer[i] = first_free_addr;
- am_writeword (dev, hdr_addr, first_free_addr);
- am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
- am_writeword (dev, hdr_addr + 4, 0xf000);
- am_writeword (dev, hdr_addr + 6, 0);
- first_free_addr += 1600;
- hdr_addr += 8;
- }
-
- write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
- write_rreg (dev->base_addr, BASERXH, 0);
- write_rreg (dev->base_addr, BASETXL, priv->txhdr);
- write_rreg (dev->base_addr, BASERXH, 0);
- write_rreg (dev->base_addr, CSR0, CSR0_STOP);
- write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
- write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
- write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
-}
-
-static void am79c961_timer(struct timer_list *t)
-{
- struct dev_priv *priv = from_timer(priv, t, timer);
- struct net_device *dev = priv->dev;
- unsigned int lnkstat, carrier;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->chip_lock, flags);
- lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
- spin_unlock_irqrestore(&priv->chip_lock, flags);
- carrier = netif_carrier_ok(dev);
-
- if (lnkstat && !carrier) {
- netif_carrier_on(dev);
- printk("%s: link up\n", dev->name);
- } else if (!lnkstat && carrier) {
- netif_carrier_off(dev);
- printk("%s: link down\n", dev->name);
- }
-
- mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
-}
-
-/*
- * Open/initialize the board.
- */
-static int
-am79c961_open(struct net_device *dev)
-{
- struct dev_priv *priv = netdev_priv(dev);
- int ret;
-
- ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
- if (ret)
- return ret;
-
- am79c961_init_for_open(dev);
-
- netif_carrier_off(dev);
-
- priv->timer.expires = jiffies;
- add_timer(&priv->timer);
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-/*
- * The inverse routine to am79c961_open().
- */
-static int
-am79c961_close(struct net_device *dev)
-{
- struct dev_priv *priv = netdev_priv(dev);
- unsigned long flags;
-
- del_timer_sync(&priv->timer);
-
- netif_stop_queue(dev);
- netif_carrier_off(dev);
-
- spin_lock_irqsave(&priv->chip_lock, flags);
- write_rreg (dev->base_addr, CSR0, CSR0_STOP);
- write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
- spin_unlock_irqrestore(&priv->chip_lock, flags);
-
- free_irq (dev->irq, dev);
-
- return 0;
-}
-
-/*
- * Set or clear promiscuous/multicast mode filter for this adapter.
- */
-static void am79c961_setmulticastlist (struct net_device *dev)
-{
- struct dev_priv *priv = netdev_priv(dev);
- unsigned long flags;
- u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
- int i, stopped;
-
- spin_lock_irqsave(&priv->chip_lock, flags);
-
- stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
-
- if (!stopped) {
- /*
- * Put the chip into suspend mode
- */
- write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
-
- /*
- * Spin waiting for chip to report suspend mode
- */
- while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
- spin_unlock_irqrestore(&priv->chip_lock, flags);
- nop();
- spin_lock_irqsave(&priv->chip_lock, flags);
- }
- }
-
- /*
- * Update the multicast hash table
- */
- for (i = 0; i < ARRAY_SIZE(multi_hash); i++)
- write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
-
- /*
- * Write the mode register
- */
- write_rreg(dev->base_addr, MODE, mode);
-
- if (!stopped) {
- /*
- * Put the chip back into running mode
- */
- write_rreg(dev->base_addr, CTRL1, 0);
- }
-
- spin_unlock_irqrestore(&priv->chip_lock, flags);
-}
-
-static void am79c961_timeout(struct net_device *dev, unsigned int txqueue)
-{
- printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
- dev->name);
-
- /*
- * ought to do some setup of the tx side here
- */
-
- netif_wake_queue(dev);
-}
-
-/*
- * Transmit a packet
- */
-static netdev_tx_t
-am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
-{
- struct dev_priv *priv = netdev_priv(dev);
- unsigned int hdraddr, bufaddr;
- unsigned int head;
- unsigned long flags;
-
- head = priv->txhead;
- hdraddr = priv->txhdr + (head << 3);
- bufaddr = priv->txbuffer[head];
- head += 1;
- if (head >= TX_BUFFERS)
- head = 0;
-
- am_writebuffer (dev, bufaddr, skb->data, skb->len);
- am_writeword (dev, hdraddr + 4, -skb->len);
- am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
- priv->txhead = head;
-
- spin_lock_irqsave(&priv->chip_lock, flags);
- write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
- spin_unlock_irqrestore(&priv->chip_lock, flags);
-
- /*
- * If the next packet is owned by the ethernet device,
- * then the tx ring is full and we can't add another
- * packet.
- */
- if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
- netif_stop_queue(dev);
-
- dev_consume_skb_any(skb);
-
- return NETDEV_TX_OK;
-}
-
-/*
- * If we have a good packet(s), get it/them out of the buffers.
- */
-static void
-am79c961_rx(struct net_device *dev, struct dev_priv *priv)
-{
- do {
- struct sk_buff *skb;
- u_int hdraddr;
- u_int pktaddr;
- u_int status;
- int len;
-
- hdraddr = priv->rxhdr + (priv->rxtail << 3);
- pktaddr = priv->rxbuffer[priv->rxtail];
-
- status = am_readword (dev, hdraddr + 2);
- if (status & RMD_OWN) /* do we own it? */
- break;
-
- priv->rxtail ++;
- if (priv->rxtail >= RX_BUFFERS)
- priv->rxtail = 0;
-
- if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
- am_writeword (dev, hdraddr + 2, RMD_OWN);
- dev->stats.rx_errors++;
- if (status & RMD_ERR) {
- if (status & RMD_FRAM)
- dev->stats.rx_frame_errors++;
- if (status & RMD_CRC)
- dev->stats.rx_crc_errors++;
- } else if (status & RMD_STP)
- dev->stats.rx_length_errors++;
- continue;
- }
-
- len = am_readword(dev, hdraddr + 6);
- skb = netdev_alloc_skb(dev, len + 2);
-
- if (skb) {
- skb_reserve(skb, 2);
-
- am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
- am_writeword(dev, hdraddr + 2, RMD_OWN);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_bytes += len;
- dev->stats.rx_packets++;
- } else {
- am_writeword (dev, hdraddr + 2, RMD_OWN);
- dev->stats.rx_dropped++;
- break;
- }
- } while (1);
-}
-
-/*
- * Update stats for the transmitted packet
- */
-static void
-am79c961_tx(struct net_device *dev, struct dev_priv *priv)
-{
- do {
- short len;
- u_int hdraddr;
- u_int status;
-
- hdraddr = priv->txhdr + (priv->txtail << 3);
- status = am_readword (dev, hdraddr + 2);
- if (status & TMD_OWN)
- break;
-
- priv->txtail ++;
- if (priv->txtail >= TX_BUFFERS)
- priv->txtail = 0;
-
- if (status & TMD_ERR) {
- u_int status2;
-
- dev->stats.tx_errors++;
-
- status2 = am_readword (dev, hdraddr + 6);
-
- /*
- * Clear the error byte
- */
- am_writeword (dev, hdraddr + 6, 0);
-
- if (status2 & TST_RTRY)
- dev->stats.collisions += 16;
- if (status2 & TST_LCOL)
- dev->stats.tx_window_errors++;
- if (status2 & TST_LCAR)
- dev->stats.tx_carrier_errors++;
- if (status2 & TST_UFLO)
- dev->stats.tx_fifo_errors++;
- continue;
- }
- dev->stats.tx_packets++;
- len = am_readword (dev, hdraddr + 4);
- dev->stats.tx_bytes += -len;
- } while (priv->txtail != priv->txhead);
-
- netif_wake_queue(dev);
-}
-
-static irqreturn_t
-am79c961_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct dev_priv *priv = netdev_priv(dev);
- u_int status, n = 100;
- int handled = 0;
-
- do {
- status = read_rreg(dev->base_addr, CSR0);
- write_rreg(dev->base_addr, CSR0, status &
- (CSR0_IENA|CSR0_TINT|CSR0_RINT|
- CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
-
- if (status & CSR0_RINT) {
- handled = 1;
- am79c961_rx(dev, priv);
- }
- if (status & CSR0_TINT) {
- handled = 1;
- am79c961_tx(dev, priv);
- }
- if (status & CSR0_MISS) {
- handled = 1;
- dev->stats.rx_dropped++;
- }
- if (status & CSR0_CERR) {
- handled = 1;
- mod_timer(&priv->timer, jiffies);
- }
- } while (--n && status & (CSR0_RINT | CSR0_TINT));
-
- return IRQ_RETVAL(handled);
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void am79c961_poll_controller(struct net_device *dev)
-{
- unsigned long flags;
- local_irq_save(flags);
- am79c961_interrupt(dev->irq, dev);
- local_irq_restore(flags);
-}
-#endif
-
-/*
- * Initialise the chip. Note that we always expect
- * to be entered with interrupts enabled.
- */
-static int
-am79c961_hw_init(struct net_device *dev)
-{
- struct dev_priv *priv = netdev_priv(dev);
-
- spin_lock_irq(&priv->chip_lock);
- write_rreg (dev->base_addr, CSR0, CSR0_STOP);
- write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
- spin_unlock_irq(&priv->chip_lock);
-
- am79c961_ramtest(dev, 0x66);
- am79c961_ramtest(dev, 0x99);
-
- return 0;
-}
-
-static void __init am79c961_banner(void)
-{
- static unsigned version_printed;
-
- if (net_debug && version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-}
-static const struct net_device_ops am79c961_netdev_ops = {
- .ndo_open = am79c961_open,
- .ndo_stop = am79c961_close,
- .ndo_start_xmit = am79c961_sendpacket,
- .ndo_set_rx_mode = am79c961_setmulticastlist,
- .ndo_tx_timeout = am79c961_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = am79c961_poll_controller,
-#endif
-};
-
-static int am79c961_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct net_device *dev;
- struct dev_priv *priv;
- int i, ret;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res)
- return -ENODEV;
-
- dev = alloc_etherdev(sizeof(struct dev_priv));
- ret = -ENOMEM;
- if (!dev)
- goto out;
-
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- priv = netdev_priv(dev);
-
- /*
- * Fixed address and IRQ lines here.
- * The PNP initialisation should have been
- * done by the ether bootp loader.
- */
- dev->base_addr = res->start;
- ret = platform_get_irq(pdev, 0);
-
- if (ret < 0) {
- ret = -ENODEV;
- goto nodev;
- }
- dev->irq = ret;
-
- ret = -ENODEV;
- if (!request_region(dev->base_addr, 0x18, dev->name))
- goto nodev;
-
- /*
- * Reset the device.
- */
- inb(dev->base_addr + NET_RESET);
- udelay(5);
-
- /*
- * Check the manufacturer part of the
- * ether address.
- */
- if (inb(dev->base_addr) != 0x08 ||
- inb(dev->base_addr + 2) != 0x00 ||
- inb(dev->base_addr + 4) != 0x2b)
- goto release;
-
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
-
- am79c961_banner();
-
- spin_lock_init(&priv->chip_lock);
- priv->dev = dev;
- timer_setup(&priv->timer, am79c961_timer, 0);
-
- if (am79c961_hw_init(dev))
- goto release;
-
- dev->netdev_ops = &am79c961_netdev_ops;
-
- ret = register_netdev(dev);
- if (ret == 0) {
- printk(KERN_INFO "%s: ether address %pM\n",
- dev->name, dev->dev_addr);
- return 0;
- }
-
-release:
- release_region(dev->base_addr, 0x18);
-nodev:
- free_netdev(dev);
-out:
- return ret;
-}
-
-static struct platform_driver am79c961_driver = {
- .probe = am79c961_probe,
- .driver = {
- .name = "am79c961",
- },
-};
-
-static int __init am79c961_init(void)
-{
- return platform_driver_register(&am79c961_driver);
-}
-
-__initcall(am79c961_init);
diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h
deleted file mode 100644
index 73679e053ceb..000000000000
--- a/drivers/net/ethernet/amd/am79c961a.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/net/ethernet/amd/am79c961a.h
- */
-
-#ifndef _LINUX_am79c961a_H
-#define _LINUX_am79c961a_H
-
-/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
-#define DEBUG_TX 2
-#define DEBUG_RX 4
-#define DEBUG_INT 8
-#define DEBUG_IC 16
-#ifndef NET_DEBUG
-#define NET_DEBUG 0
-#endif
-
-#define NET_UID 0
-#define NET_RDP 0x10
-#define NET_RAP 0x12
-#define NET_RESET 0x14
-#define NET_IDP 0x16
-
-/*
- * RAP registers
- */
-#define CSR0 0
-#define CSR0_INIT 0x0001
-#define CSR0_STRT 0x0002
-#define CSR0_STOP 0x0004
-#define CSR0_TDMD 0x0008
-#define CSR0_TXON 0x0010
-#define CSR0_RXON 0x0020
-#define CSR0_IENA 0x0040
-#define CSR0_INTR 0x0080
-#define CSR0_IDON 0x0100
-#define CSR0_TINT 0x0200
-#define CSR0_RINT 0x0400
-#define CSR0_MERR 0x0800
-#define CSR0_MISS 0x1000
-#define CSR0_CERR 0x2000
-#define CSR0_BABL 0x4000
-#define CSR0_ERR 0x8000
-
-#define CSR3 3
-#define CSR3_EMBA 0x0008
-#define CSR3_DXMT2PD 0x0010
-#define CSR3_LAPPEN 0x0020
-#define CSR3_DXSUFLO 0x0040
-#define CSR3_IDONM 0x0100
-#define CSR3_TINTM 0x0200
-#define CSR3_RINTM 0x0400
-#define CSR3_MERRM 0x0800
-#define CSR3_MISSM 0x1000
-#define CSR3_BABLM 0x4000
-#define CSR3_MASKALL 0x5F00
-
-#define CSR4 4
-#define CSR4_JABM 0x0001
-#define CSR4_JAB 0x0002
-#define CSR4_TXSTRTM 0x0004
-#define CSR4_TXSTRT 0x0008
-#define CSR4_RCVCCOM 0x0010
-#define CSR4_RCVCCO 0x0020
-#define CSR4_MFCOM 0x0100
-#define CSR4_MFCO 0x0200
-#define CSR4_ASTRP_RCV 0x0400
-#define CSR4_APAD_XMIT 0x0800
-
-#define CTRL1 5
-#define CTRL1_SPND 0x0001
-
-#define LADRL 8
-#define LADRM1 9
-#define LADRM2 10
-#define LADRH 11
-#define PADRL 12
-#define PADRM 13
-#define PADRH 14
-
-#define MODE 15
-#define MODE_DISRX 0x0001
-#define MODE_DISTX 0x0002
-#define MODE_LOOP 0x0004
-#define MODE_DTCRC 0x0008
-#define MODE_COLL 0x0010
-#define MODE_DRETRY 0x0020
-#define MODE_INTLOOP 0x0040
-#define MODE_PORT_AUI 0x0000
-#define MODE_PORT_10BT 0x0080
-#define MODE_DRXPA 0x2000
-#define MODE_DRXBA 0x4000
-#define MODE_PROMISC 0x8000
-
-#define BASERXL 24
-#define BASERXH 25
-#define BASETXL 30
-#define BASETXH 31
-
-#define POLLINT 47
-
-#define SIZERXR 76
-#define SIZETXR 78
-
-#define CSR_MFC 112
-
-#define RMD_ENP 0x0100
-#define RMD_STP 0x0200
-#define RMD_CRC 0x0800
-#define RMD_FRAM 0x2000
-#define RMD_ERR 0x4000
-#define RMD_OWN 0x8000
-
-#define TMD_ENP 0x0100
-#define TMD_STP 0x0200
-#define TMD_MORE 0x1000
-#define TMD_ERR 0x4000
-#define TMD_OWN 0x8000
-
-#define TST_RTRY 0x0400
-#define TST_LCAR 0x0800
-#define TST_LCOL 0x1000
-#define TST_UFLO 0x4000
-#define TST_BUFF 0x8000
-
-#define ISALED0 0x0004
-#define ISALED0_LNKST 0x8000
-
-struct dev_priv {
- unsigned long rxbuffer[RX_BUFFERS];
- unsigned long txbuffer[TX_BUFFERS];
- unsigned char txhead;
- unsigned char txtail;
- unsigned char rxhead;
- unsigned char rxtail;
- unsigned long rxhdr;
- unsigned long txhdr;
- spinlock_t chip_lock;
- struct timer_list timer;
- struct net_device *dev;
-};
-
-#endif
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index efb33c078a3c..cec2018c84a9 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -19,7 +19,6 @@ if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI
- depends on X86_64 || ARM64 || COMPILE_TEST
depends on MACSEC || MACSEC=n
help
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0fdd19d99d99..0404aafd5ce5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2503,8 +2503,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_free_netdev;
+ }
/* Allocate number of TX rings */
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
@@ -2577,6 +2579,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4edd6f8e017e..d10e4f85dd11 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6790,8 +6790,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
+ else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + 1;
+ tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) {
kfree(ctx);
@@ -6925,7 +6927,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
- i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ i < BNXT_MAX_TQM_RINGS;
+ i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
@@ -12887,10 +12890,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err = 0, off;
- pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12919,22 +12922,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
- if (!err) {
- err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
- }
- bnxt_ulp_start(bp, err);
- if (!err) {
- bnxt_reenable_sriov(bp);
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- }
- }
-
- if (result != PCI_ERS_RESULT_RECOVERED) {
- if (netif_running(netdev))
- dev_close(netdev);
- pci_disable_device(pdev);
}
rtnl_unlock();
@@ -12952,10 +12941,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
+ int err;
+ netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
- netif_device_attach(netdev);
+ err = bnxt_hwrm_func_qcaps(bp);
+ if (!err && netif_running(netdev))
+ err = bnxt_open(netdev);
+
+ bnxt_ulp_start(bp, err);
+ if (!err) {
+ bnxt_reenable_sriov(bp);
+ netif_device_attach(netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 950ea26ae0d2..51996c85547e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1436,6 +1436,11 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
+#define BNXT_MAX_TQM_SP_RINGS 1
+#define BNXT_MAX_TQM_FP_RINGS 8
+#define BNXT_MAX_TQM_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -1474,7 +1479,7 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[9];
+ struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
};
struct bnxt_fw_health {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ff79d5d14c4..2f8b193a772d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2532,7 +2532,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
- install.flags |=
+ install.flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = _hwrm_send_message_silent(bp, &install,
@@ -2546,6 +2546,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
* UPDATE directory and try the flash again
*/
defrag_attempted = true;
+ install.flags = 0;
rc = __bnxt_flash_nvram(bp->dev,
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 8c8368c2f335..64dbbb04b043 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -222,8 +222,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
- return BNXT_MIN_ROCE_STAT_CTXS;
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ return BNXT_MIN_ROCE_STAT_CTXS;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index be85dad2e3bc..fcca023f22e5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -4069,8 +4069,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
err = register_netdev(dev);
- if (err)
+ if (err) {
+ bcmgenet_mii_exit(dev);
goto err;
+ }
return err;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d5d910916c2e..814a5b10141d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -467,7 +467,7 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
{
long ferr, rate, rate_rounded;
- if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
+ if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
return;
switch (speed) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 92473dda55d9..22a0220123ad 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -40,6 +40,13 @@
#define TCB_L2T_IX_M 0xfffULL
#define TCB_L2T_IX_V(x) ((x) << TCB_L2T_IX_S)
+#define TCB_T_FLAGS_W 1
+#define TCB_T_FLAGS_S 0
+#define TCB_T_FLAGS_M 0xffffffffffffffffULL
+#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+
+#define TCB_FIELD_COOKIE_TFLAG 1
+
#define TCB_SMAC_SEL_W 0
#define TCB_SMAC_SEL_S 24
#define TCB_SMAC_SEL_M 0xffULL
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 72bb123d53db..9e2378013642 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -575,7 +575,11 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+ u64 mask, u64 val, u8 cookie,
+ int through_l2t);
int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
+void chtls_set_quiesce_ctrl(struct sock *sk, int val);
void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
unsigned int keyid_to_addr(int start_addr, int keyid);
void free_tls_keyid(struct sock *sk);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 95ab871d8d59..19dc7dc054a2 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -32,6 +32,7 @@
#include "chtls.h"
#include "chtls_cm.h"
#include "clip_tbl.h"
+#include "t4_tcb.h"
/*
* State transitions and actions for close. Note that if we are in SYN_SENT
@@ -267,7 +268,9 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
if (sk->sk_state != TCP_SYN_RECV)
chtls_send_abort(sk, mode, skb);
else
- goto out;
+ chtls_set_tcb_field_rpl_skb(sk, TCB_T_FLAGS_W,
+ TCB_T_FLAGS_V(TCB_T_FLAGS_M), 0,
+ TCB_FIELD_COOKIE_TFLAG, 1);
return;
out:
@@ -621,7 +624,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk =
- container_of((struct synq *)__skb_dequeue
+ container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk;
@@ -1109,6 +1112,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
@@ -1118,9 +1122,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct dst_entry *dst;
struct tcp_sock *tp;
struct sock *newsk;
+ bool found = false;
u16 port_id;
int rxq_idx;
- int step;
+ int step, i;
iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@@ -1145,22 +1150,27 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
fl6.daddr = ip6h->saddr;
fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port;
fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num);
- security_req_classify_flow(oreq, flowi6_to_flowi(&fl6));
+ security_req_classify_flow(oreq, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL);
if (IS_ERR(dst))
goto free_sk;
n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif
}
- if (!n)
- goto free_sk;
+ if (!n || !n->dev)
+ goto free_dst;
ndev = n->dev;
- if (!ndev)
- goto free_dst;
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
+ for_each_port(adap, i)
+ if (cdev->ports[i] == ndev)
+ found = true;
+
+ if (!found)
+ goto free_dst;
+
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@@ -1238,6 +1248,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
+ if (n)
+ neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
@@ -1387,7 +1399,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
- goto free_oreq;
+ goto reject;
if (chtls_get_module(newsk))
goto reject;
@@ -1403,8 +1415,6 @@ static void chtls_pass_accept_request(struct sock *sk,
kfree_skb(skb);
return;
-free_oreq:
- chtls_reqsk_free(oreq);
reject:
mk_tid_release(reply_skb, 0, tid);
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@@ -1589,6 +1599,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid);
+ if (!data) {
+ /* listening server close */
+ kfree_skb(skb);
+ goto unlock;
+ }
lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk);
@@ -1936,6 +1951,8 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
else if (tcp_sk(sk)->linger2 < 0 &&
!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
chtls_abort_conn(sk, skb);
+ else if (csk_flag_nochk(csk, CSK_TX_DATA_SENT))
+ chtls_set_quiesce_ctrl(sk, 0);
break;
default:
pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
@@ -1997,39 +2014,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
spin_unlock_bh(&cdev->deferq.lock);
}
-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
- struct chtls_dev *cdev, int status, int queue)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct sk_buff *reply_skb;
- struct chtls_sock *csk;
-
- csk = rcu_dereference_sk_user_data(sk);
-
- reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
- GFP_KERNEL);
-
- if (!reply_skb) {
- req->status = (queue << 1);
- t4_defer_reply(skb, cdev, send_defer_abort_rpl);
- return;
- }
-
- set_abort_rpl_wr(reply_skb, GET_TID(req), status);
- kfree_skb(skb);
-
- set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
- if (csk_conn_inline(csk)) {
- struct l2t_entry *e = csk->l2t_entry;
-
- if (e && sk->sk_state != TCP_SYN_RECV) {
- cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
- return;
- }
- }
- cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2078,9 +2062,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx;
skb->sk = NULL;
+ chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+ CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk);
- send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
- CPL_ABORT_NO_RST, queue);
}
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@@ -2110,8 +2094,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx;
+ chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk);
- send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else {
skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@@ -2129,9 +2113,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx;
if (is_neg_adv(req->status)) {
- if (sk->sk_state == TCP_SYN_RECV)
- chtls_set_tcb_tflag(sk, 0, 0);
-
kfree_skb(skb);
return;
}
@@ -2158,12 +2139,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
- chtls_release_resources(sk);
- chtls_conn_done(sk);
}
chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
rst_status, queue);
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
@@ -2315,6 +2296,28 @@ static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static int chtls_set_tcb_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_set_tcb_rpl *rpl = cplhdr(skb) + RSS_HDR;
+ unsigned int hwtid = GET_TID(rpl);
+ struct sock *sk;
+
+ sk = lookup_tid(cdev->tids, hwtid);
+
+ /* return EINVAL if socket doesn't exist */
+ if (!sk)
+ return -EINVAL;
+
+ /* Reusing the skb as size of cpl_set_tcb_field structure
+ * is greater than cpl_abort_req
+ */
+ if (TCB_COOKIE_G(rpl->cookie) == TCB_FIELD_COOKIE_TFLAG)
+ chtls_send_abort(sk, CPL_ABORT_SEND_RST, NULL);
+
+ kfree_skb(skb);
+ return 0;
+}
+
chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
[CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
[CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
@@ -2327,5 +2330,6 @@ chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
[CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
[CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
- [CPL_FW4_ACK] = chtls_wr_ack,
+ [CPL_FW4_ACK] = chtls_wr_ack,
+ [CPL_SET_TCB_RPL] = chtls_set_tcb_rpl,
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index a4fb463af22a..1e67140b0f80 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -88,6 +88,24 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
return ret < 0 ? ret : 0;
}
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+ u64 mask, u64 val, u8 cookie,
+ int through_l2t)
+{
+ struct sk_buff *skb;
+ unsigned int wrlen;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return;
+
+ __set_tcb_field(sk, skb, word, mask, val, cookie, 0);
+ send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
+}
+
/*
* Set one of the t_flags bits in the TCB.
*/
@@ -113,6 +131,29 @@ static int chtls_set_tcb_quiesce(struct sock *sk, int val)
TF_RX_QUIESCE_V(val));
}
+void chtls_set_quiesce_ctrl(struct sock *sk, int val)
+{
+ struct chtls_sock *csk;
+ struct sk_buff *skb;
+ unsigned int wrlen;
+ int ret;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ csk = rcu_dereference_sk_user_data(sk);
+
+ __set_tcb_field(sk, skb, 1, TF_RX_QUIESCE_V(1), 0, 0, 1);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+ ret = cxgb4_ofld_send(csk->egress_dev, skb);
+ if (ret < 0)
+ kfree_skb(skb);
+}
+
/* TLS Key bitmap processing */
int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
{
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0981fe9652e5..3d9b0b161e24 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free2;
+ goto free3;
}
ret = ethoc_mdio_probe(netdev);
@@ -1243,6 +1243,7 @@ error2:
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
+free3:
mdiobus_free(priv->mdio);
free2:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 91cff93dbdae..fb0bcd18ec0c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -878,7 +878,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
swa = (struct dpaa2_eth_swa *)sgt_buf;
swa->type = DPAA2_ETH_SWA_SINGLE;
swa->single.skb = skb;
- swa->sg.sgt_size = sgt_buf_size;
+ swa->single.sgt_size = sgt_buf_size;
/* Separately map the SGT buffer */
sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c527f4ee1d3a..0602d5d5d2ee 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -462,6 +462,11 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+/* Some link partners do not tolerate the momentary reset of the REF_CLK
+ * frequency when the RNCTL register is cleared by hardware reset.
+ */
+#define FEC_QUIRK_NO_HARD_RESET (1 << 18)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 04f24c66cf36..9ebdb0e54291 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,7 +100,8 @@ static const struct fec_devinfo fec_imx27_info = {
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_NO_HARD_RESET,
};
static const struct fec_devinfo fec_imx6q_info = {
@@ -953,7 +954,8 @@ fec_restart(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
@@ -2165,9 +2167,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->parent = &pdev->dev;
err = of_mdiobus_register(fep->mii_bus, node);
- of_node_put(node);
if (err)
goto err_out_free_mdiobus;
+ of_node_put(node);
mii_cnt++;
@@ -2180,6 +2182,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
err_out_free_mdiobus:
mdiobus_free(fep->mii_bus);
err_out:
+ of_node_put(node);
return err;
}
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index c8e5d889bd81..21de56345503 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
};
module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 8b51ee142fa3..152f4d83765a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
};
module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ba8869c3d891..6d853f018d53 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3889,6 +3889,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
+ dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
@@ -3934,12 +3935,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
- free_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
+ free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 1a9bdf66a7d8..11d4bf5dc21f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
- u8 res2[0x80 - 0x74];
+ u8 res2[0x78 - 0x74];
+ u64 snums_en;
+ u32 l2l3baseptr; /* top byte consists of a few other bit fields */
+
+ u16 mtu[8];
+ u8 res3[0xa8 - 0x94];
+ u32 wrrtablebase; /* top byte is reserved */
+ u8 res4[0xc0 - 0xac];
} __packed;
/* structure representing Extended Filtering Global Parameters in PRAM */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7165da0ee9aa..a6e3f07caf99 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
/* for mutl buffer*/
new_skb = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb_any(skb);
+ if (!new_skb) {
+ netdev_err(ndev, "skb alloc failed\n");
+ return;
+ }
skb = new_skb;
check_ok = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index fb5e8842983c..33defa4c180a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -169,7 +169,7 @@ struct hclgevf_mbx_arq_ring {
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
- (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \
- (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e6f37f91c489..c242883fea5d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
- if (hdev->hw.mac.phydev) {
+ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
@@ -4537,8 +4538,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -4730,6 +4731,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 50a294dfaff5..ca46bc9110d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -107,6 +107,8 @@
#define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 145757cb70f9..674b3a22e91f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -917,8 +917,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -2502,7 +2502,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 1b183bc35604..f6d817a3edcb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -122,6 +122,8 @@
#define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a2191392ca4f..f79034c786c8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -955,6 +955,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_rx_pools(adapter);
release_napi(adapter);
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
}
@@ -2171,10 +2172,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
napi_schedule(&adapter->napi[i]);
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
- adapter->reset_reason == VNIC_RESET_MOBILITY) {
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
- call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
- }
+ adapter->reset_reason == VNIC_RESET_MOBILITY)
+ __netdev_notify_peers(netdev);
rc = 0;
@@ -2249,8 +2248,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
- call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
+ __netdev_notify_peers(netdev);
out:
/* restore adapter state if reset failed */
if (rc)
@@ -2344,8 +2342,7 @@ static void __ibmvnic_reset(struct work_struct *work)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
- } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
- adapter->from_passive_init)) {
+ } else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
@@ -2984,9 +2981,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
int rc;
if (!scrq) {
- netdev_dbg(adapter->netdev,
- "Invalid scrq reset. irq (%d) or msgs (%p).\n",
- scrq->irq, scrq->msgs);
+ netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
return -EINVAL;
}
@@ -3876,7 +3871,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
return -1;
}
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
+
client_data_len = vnic_client_data_len(adapter);
buffer_size =
@@ -5087,6 +5084,12 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
+ */
+ dma_rmb();
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
@@ -5441,11 +5444,6 @@ static int ibmvnic_remove(struct vio_dev *dev)
unsigned long flags;
spin_lock_irqsave(&adapter->state_lock, flags);
- if (test_bit(0, &adapter->resetting)) {
- spin_unlock_irqrestore(&adapter->state_lock, flags);
- return -EBUSY;
- }
-
adapter->state = VNIC_REMOVING;
spin_unlock_irqrestore(&adapter->state_lock, flags);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba7a0f8f6937..5b2143f4b1f8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
+#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 03215b0aee4b..06442e6bef73 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -23,6 +23,13 @@ struct e1000_stats {
int stat_offset;
};
+static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
+ "s0ix-enabled",
+};
+
+#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
+
#define E1000_STAT(str, m) { \
.stat_string = str, \
.type = E1000_STATS, \
@@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
return E1000_TEST_LEN;
case ETH_SS_STATS:
return E1000_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return E1000E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, e1000e_priv_flags_strings,
+ E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
+static u32 e1000e_get_priv_flags(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+
+ return priv_flags;
+}
+
+static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags2 = adapter->flags2;
+
+ flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
+ if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac.type < e1000_pch_cnp)
+ return -EINVAL;
+ flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+ }
+
+ if (flags2 != adapter->flags2)
+ adapter->flags2 = flags2;
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
@@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_eee = e1000e_set_eee,
.get_link_ksettings = e1000_get_link_ksettings,
.set_link_ksettings = e1000_set_link_ksettings,
+ .get_priv_flags = e1000e_get_priv_flags,
+ .set_priv_flags = e1000e_set_priv_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9aa6fad8ed47..6fb46682b058 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
return 0;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ struct e1000_adapter *adapter = hw->adapter;
+ bool firmware_bug = false;
+
if (force) {
/* Request ME un-configure ULP mode in the PHY */
mac_reg = er32(H2ME);
@@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
ew32(H2ME, mac_reg);
}
- /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
+ /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
+ * If this takes more than 1 second, show a warning indicating a
+ * firmware bug
+ */
while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
- if (i++ == 30) {
+ if (i++ == 250) {
ret_val = -E1000_ERR_PHY;
goto out;
}
+ if (i > 100 && !firmware_bug)
+ firmware_bug = true;
usleep_range(10000, 11000);
}
- e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+ if (firmware_bug)
+ e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
+ else
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (force) {
mac_reg = er32(H2ME);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 128ab6898070..e9b82c209c2d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
-struct e1000e_me_supported {
- u16 device_id; /* supported device ID */
-};
-
-static const struct e1000e_me_supported me_supported[] = {
- {E1000_DEV_ID_PCH_LPT_I217_LM},
- {E1000_DEV_ID_PCH_LPTLP_I218_LM},
- {E1000_DEV_ID_PCH_I218_LM2},
- {E1000_DEV_ID_PCH_I218_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM},
- {E1000_DEV_ID_PCH_SPT_I219_LM2},
- {E1000_DEV_ID_PCH_LBG_I219_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM4},
- {E1000_DEV_ID_PCH_SPT_I219_LM5},
- {E1000_DEV_ID_PCH_CNP_I219_LM6},
- {E1000_DEV_ID_PCH_CNP_I219_LM7},
- {E1000_DEV_ID_PCH_ICP_I219_LM8},
- {E1000_DEV_ID_PCH_ICP_I219_LM9},
- {E1000_DEV_ID_PCH_CMP_I219_LM10},
- {E1000_DEV_ID_PCH_CMP_I219_LM11},
- {E1000_DEV_ID_PCH_CMP_I219_LM12},
- {E1000_DEV_ID_PCH_TGP_I219_LM13},
- {E1000_DEV_ID_PCH_TGP_I219_LM14},
- {E1000_DEV_ID_PCH_TGP_I219_LM15},
- {0}
-};
-
-static bool e1000e_check_me(u16 device_id)
-{
- struct e1000e_me_supported *id;
-
- for (id = (struct e1000e_me_supported *)me_supported;
- id->device_id; id++)
- if (device_id == id->device_id)
- return true;
-
- return false;
-}
-
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
- if (rc)
+ if (rc) {
e1000e_pm_thaw(dev);
-
- /* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
- e1000e_s0ix_entry_flow(adapter);
+ } else {
+ /* Introduce S0ix implementation */
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ e1000e_s0ix_entry_flow(adapter);
+ }
return rc;
}
@@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
@@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
+ if (hw->mac.type >= e1000_pch_cnp)
+ adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+
strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d231a2cdd98f..118473dfdcbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -120,6 +120,7 @@ enum i40e_state_t {
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
+ __I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
@@ -146,6 +147,8 @@ enum i40e_state_t {
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1337686bd099..1db482d310c2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+ * Resets PF and reinitializes PFs VSI.
+ */
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 729c4f0d5ac5..1b6ec9be155a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
-
- /* Always report link is down if the VF queues aren't enabled */
- if (!vf->queues_enabled) {
- pfe.event_data.link_event.link_status = false;
- pfe.event_data.link_event.link_speed = 0;
- } else if (vf->link_forced) {
+ if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
(vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0);
@@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed);
}
-
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -1772,7 +1766,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
goto sriov_configure_out;
@@ -1781,7 +1775,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
ret = -EINVAL;
@@ -2443,8 +2437,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
- vf->queues_enabled = true;
-
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2466,9 +2458,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- /* Immediately mark queues as disabled */
- vf->queues_enabled = false;
-
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -4046,20 +4035,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
- * If the VF is indeed in reset, the vsi pointer has
- * to show on the newly loaded vsi under pf->vsi[id].
+ * Acquire the VSI pointer only after the VF has been
+ * properly initialized.
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- if (i > 0)
- vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
break;
- }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
@@ -4068,6 +4053,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ret = -EAGAIN;
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 5491215d81de..091e32c1bb46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,7 +98,6 @@ struct i40e_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if VF link is forced */
- bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
u16 num_vlan;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index bfa84bfb0488..492ce213208d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -220,8 +220,11 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
} while (count);
no_buffers:
- if (rx_ring->next_to_use != ntu)
+ if (rx_ring->next_to_use != ntu) {
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
i40e_release_rx_desc(rx_ring, ntu);
+ }
return ok;
}
@@ -345,12 +348,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
* SBP is *not* set in PRT_SBPVSI (default not set).
*/
skb = i40e_construct_skb_zc(rx_ring, *bi);
- *bi = NULL;
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
break;
}
+ *bi = NULL;
cleaned_count++;
i40e_inc_ntc(rx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 95543dfd4fe7..0a867d64d467 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_add_device(adapter);
- if (err) {
- rtnl_unlock();
+ if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
- }
}
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 56725356a17b..fa1e128c24ec 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -68,7 +68,9 @@
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
-#define ICE_MIN_MSIX 2
+#define ICE_MIN_LAN_TXRX_MSIX 1
+#define ICE_MIN_LAN_OICR_MSIX 1
+#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 9e8e9531cd87..69c113a4de7e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3258,8 +3258,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_txq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3268,8 +3268,8 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min_t(int, num_online_cpus(),
- pf->hw.func_caps.common_cap.num_rxq);
+ return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+ (u16)pf->hw.func_caps.common_cap.num_rxq);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 2d27f66ac853..192729546bbf 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1576,7 +1576,13 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
sizeof(struct in6_addr));
input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
- input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
+ /* if no protocol requested, use IPPROTO_NONE */
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ input->ip.v6.proto = IPPROTO_NONE;
+ else
+ input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 3df67486d42d..ad9c22a1b97a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -161,8 +161,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
switch (vsi->type) {
case ICE_VSI_PF:
- vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
- num_online_cpus());
+ vsi->alloc_txq = min3(pf->num_lan_msix,
+ ice_get_avail_txq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
@@ -174,8 +175,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
} else {
- vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
- num_online_cpus());
+ vsi->alloc_rxq = min3(pf->num_lan_msix,
+ ice_get_avail_rxq_count(pf),
+ (u16)num_online_cpus());
if (vsi->req_rxq) {
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
@@ -184,7 +186,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
+ vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
+ max_t(int, vsi->alloc_rxq,
+ vsi->alloc_txq));
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c52b9bb0e3ab..e10ca8929f85 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3430,18 +3430,14 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
-/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
-#define ICE_MIN_LAN_VECS 2
-#define ICE_MIN_RDMA_VECS 2
-#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
- if (v_actual < ICE_MIN_LAN_VECS) {
+ if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
} else {
- pf->num_lan_msix = ICE_MIN_LAN_VECS;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
}
}
@@ -4884,9 +4880,15 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
goto err_update_filters;
}
- /* Add filter for new MAC. If filter exists, just return success */
+ /* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
+ /* Although this MAC filter is already present in hardware it's
+ * possible in some cases (e.g. bonding) that dev_addr was
+ * modified outside of the driver and needs to be restored back
+ * to this value.
+ */
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index a2d0aad8cfdd..b6fa83c619dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1923,12 +1923,15 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+ int ret;
+
tunnel |= ICE_TX_CTX_EIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
- if (l4.hdr != exthdr)
- ipv6_skip_exthdr(skb, exthdr - skb->data,
- &l4_proto, &frag_off);
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
}
/* define outer transport */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 39757b4cf8f4..1782146db644 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -446,8 +446,11 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
}
} while (--count);
- if (rx_ring->next_to_use != ntu)
+ if (rx_ring->next_to_use != ntu) {
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.status_error0 = 0;
ice_release_rx_desc(rx_ring, ntu);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 61d331ce38cd..ec8cd69d4992 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1675,12 +1675,18 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = hw->phy.addr;
/* advertising link modes */
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
if (hw->mac.autoneg == 1) {
@@ -1708,7 +1714,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
Asym_Pause);
}
- status = rd32(IGC_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(IGC_STATUS);
if (status & IGC_STATUS_LU) {
if (status & IGC_STATUS_SPEED_1000) {
@@ -1792,6 +1799,12 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
+ /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+ * We have to check this and convert it to ADVERTISE_2500_FULL
+ * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+ */
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+ advertising |= ADVERTISE_2500_FULL;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 8b67d9b49a83..7ec04e48860c 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
u16 *data)
{
struct igc_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -IGC_ERR_NVM;
u32 attempts = 100000;
u32 i, k, eewr = 0;
- s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
@@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -IGC_ERR_NVM;
goto out;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 09cd0ec7ee87..67b8ffd21d8a 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -638,7 +638,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
}
out:
- return 0;
+ return ret_val;
}
/**
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index bf48f0ded9c7..925161959b9b 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -219,7 +219,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&lp->lock, flags);
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 563ceac3060f..bc4d8d144401 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4432,7 +4432,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct bpf_prog *old_prog;
if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}
@@ -5255,7 +5255,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- return err;
+ goto err_netdev;
}
/* Armada3700 network controller does not support per-cpu
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index afdd22827223..358119d98358 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+ val |= GENCONF_CTRL0_PORT0_RGMII;
else if (port->gop_id == 3)
val |= GENCONF_CTRL0_PORT1_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@@ -2370,17 +2370,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
- unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned int thread;
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
-
- put_cpu();
+ /* PKT-coalescing registers are per-queue + per-thread */
+ for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+ }
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -5487,7 +5488,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread;
- int queue, err;
+ int queue, err, val;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -5501,6 +5502,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
+ if (mvpp2_is_xlg(port->phy_interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
@@ -5869,8 +5882,6 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 5692c6087bbb..dd590086fe6a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
/* Clear entry invalidation bit */
pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
- /* Write tcam index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
-
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
+ /* Write tcam index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
+
return 0;
}
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
return -EINVAL;
}
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+ unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+ struct mvpp2_prs_entry pe;
+ unsigned int len;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* For all ports - drop flow control frames */
+ pe.index = MVPP2_PE_FC_DROP;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
/* Enable/disable dropping all mac da's */
static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
{
@@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
+ mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IPv6 header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* Jump to DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index e22f6c85d380..4b68dd374733 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -129,7 +129,7 @@
#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 7d0f96290943..1a8f5a039d50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -871,8 +871,10 @@ static int cgx_lmac_init(struct cgx *cgx)
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
- if (!lmac->name)
- return -ENOMEM;
+ if (!lmac->name) {
+ err = -ENOMEM;
+ goto err_lmac_free;
+ }
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
lmac->lmac_id = i;
lmac->cgx = cgx;
@@ -883,7 +885,7 @@ static int cgx_lmac_init(struct cgx *cgx)
CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac);
if (err)
- return err;
+ goto err_irq;
/* Enable interrupt */
cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
@@ -895,6 +897,12 @@ static int cgx_lmac_init(struct cgx *cgx)
}
return cgx_lmac_verify_fwi_version(cgx);
+
+err_irq:
+ kfree(lmac->name);
+err_lmac_free:
+ kfree(lmac);
+ return err;
}
static int cgx_lmac_exit(struct cgx *cgx)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index d298b9357177..6c6b411e78fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -469,6 +469,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -485,6 +488,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
int rc = 0, i;
u64 cfg;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->hdr.rc = rc;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 3f9d0ab6d5ae..bc0e4113370e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -275,7 +275,8 @@ static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
enum npa_af_rvu_health health_reporter)
{
struct rvu_npa_event_ctx *npa_event_context;
- unsigned int intr_val, alloc_dis, free_dis;
+ unsigned int alloc_dis, free_dis;
+ u64 intr_val;
int err;
npa_event_context = ctx;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 73fb94dd5fbc..e6869435e1f3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -478,10 +478,11 @@ dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
dma_addr_t iova;
u8 *buf;
- buf = napi_alloc_frag(pool->rbsize);
+ buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
+ buf = PTR_ALIGN(buf, OTX2_ALIGN);
iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 74d466796b7c..d5fc72b1a36f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -90,7 +90,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx)
{
struct mlx4_en_dev *mdev = priv->mdev;
- int err = 0;
+ int irq, err = 0;
int timestamp_en = 0;
bool assigned_eq = false;
@@ -116,10 +116,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
assigned_eq = true;
}
-
- cq->irq_desc =
- irq_to_desc(mlx4_eq_get_irq(mdev->dev,
- cq->vector));
+ irq = mlx4_eq_get_irq(mdev->dev, cq->vector);
+ cq->aff_mask = irq_get_effective_affinity_mask(irq);
} else {
/* For TX we use the same irq per
ring we assigned for the RX */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7954c1daf2b6..c1c9118a66c9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -958,18 +958,14 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget || !clean_complete) {
- const struct cpumask *aff;
- struct irq_data *idata;
int cpu_curr;
/* in case we got here because of !clean_complete */
done = budget;
cpu_curr = smp_processor_id();
- idata = irq_desc_get_irq_data(cq->irq_desc);
- aff = irq_data_get_affinity_mask(idata);
- if (likely(cpumask_test_cpu(cpu_curr, aff)))
+ if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask)))
return budget;
/* Current cpu is not according to smp_irq_affinity -
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 17f2b1919378..e8ed23190de0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -47,6 +47,7 @@
#endif
#include <linux/cpu_rmap.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/irq.h>
#include <net/xdp.h>
#include <linux/mlx4/device.h>
@@ -365,7 +366,7 @@ struct mlx4_en_cq {
struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e
- struct irq_desc *irq_desc;
+ const struct cpumask *aff_mask;
};
struct mlx4_en_port_profile {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index a1a81cfeb607..055baf3b6cb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -684,7 +684,7 @@ struct mlx5e_channel {
spinlock_t async_icosq_lock;
/* data path - accessed per napi poll */
- struct irq_desc *irq_desc;
+ const struct cpumask *aff_mask;
struct mlx5e_ch_stats *stats;
/* control */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 718f8c0a4f6b..84e501e057b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -273,7 +273,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
if (err)
- return err;
+ goto free_page;
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
if (IS_ERR(cmd)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 351118985a57..2a2bac30daaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -479,7 +479,6 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
c->stats = &priv->port_ptp_stats.ch;
- c->irq_desc = irq_to_desc(irq);
c->lag_port = lag_port;
netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 28aa5ae118f4..90c98ea63b7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -28,7 +28,6 @@ struct mlx5e_port_ptp {
u8 lag_port;
/* data path - accessed per napi poll */
- struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats;
/* control */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index d29af7b9c695..76177f7c5ec2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!reg_c0)
return true;
+ /* If reg_c0 is not equal to the default flow tag then skb->mark
+ * is not supported and must be reset back to 0.
+ */
+ skb->mark = 0;
+
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index e521254d886e..6bc6b48a56dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
u16 zone;
};
-struct mlx5_ct_shared_counter {
+struct mlx5_ct_counter {
struct mlx5_fc *counter;
refcount_t refcount;
+ bool is_shared;
};
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
- struct mlx5_ct_shared_counter *shared_counter;
+ struct mlx5_ct_counter *counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@@ -166,6 +167,12 @@ static const struct rhashtable_params tuples_nat_ht_params = {
.min_size = 16 * 1024,
};
+static bool
+mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+{
+ return !!(entry->tuple_nat_node.next);
+}
+
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
@@ -394,13 +401,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
-mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{
- if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+ if (entry->counter->is_shared &&
+ !refcount_dec_and_test(&entry->counter->refcount))
return;
- mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
- kfree(entry->shared_counter);
+ mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
+ kfree(entry->counter);
}
static void
@@ -699,7 +707,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_ft = ct_priv->post_ct;
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
- attr->counter = entry->shared_counter->counter;
+ attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -732,13 +740,34 @@ err_attr:
return err;
}
-static struct mlx5_ct_shared_counter *
+static struct mlx5_ct_counter *
+mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+{
+ struct mlx5_ct_counter *counter;
+ int ret;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ counter->is_shared = false;
+ counter->counter = mlx5_fc_create(ct_priv->dev, true);
+ if (IS_ERR(counter->counter)) {
+ ct_dbg("Failed to create counter for ct entry");
+ ret = PTR_ERR(counter->counter);
+ kfree(counter);
+ return ERR_PTR(ret);
+ }
+
+ return counter;
+}
+
+static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
struct mlx5_ct_tuple rev_tuple = entry->tuple;
- struct mlx5_ct_shared_counter *shared_counter;
- struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
int ret;
@@ -767,25 +796,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
tuples_ht_params);
if (rev_entry) {
- if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+ if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
mutex_unlock(&ct_priv->shared_counter_lock);
- return rev_entry->shared_counter;
+ return rev_entry->counter;
}
}
mutex_unlock(&ct_priv->shared_counter_lock);
- shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
- if (!shared_counter)
- return ERR_PTR(-ENOMEM);
-
- shared_counter->counter = mlx5_fc_create(dev, true);
- if (IS_ERR(shared_counter->counter)) {
- ct_dbg("Failed to create counter for ct entry");
- ret = PTR_ERR(shared_counter->counter);
- kfree(shared_counter);
+ shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+ if (IS_ERR(shared_counter)) {
+ ret = PTR_ERR(shared_counter);
return ERR_PTR(ret);
}
+ shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
return shared_counter;
}
@@ -798,10 +822,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{
int err;
- entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
- if (IS_ERR(entry->shared_counter)) {
- err = PTR_ERR(entry->shared_counter);
- ct_dbg("Failed to create counter for ct entry");
+ if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
+ entry->counter = mlx5_tc_ct_counter_create(ct_priv);
+ else
+ entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+
+ if (IS_ERR(entry->counter)) {
+ err = PTR_ERR(entry->counter);
return err;
}
@@ -820,7 +847,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
}
@@ -890,13 +917,13 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err_insert:
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
err_rules:
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node, tuples_nat_ht_params);
+ if (mlx5_tc_ct_entry_has_nat(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node, tuples_nat_ht_params);
err_tuple_nat:
- if (entry->tuple_node.next)
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
err_tuple:
err_set:
kfree(entry);
@@ -911,14 +938,14 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
mutex_lock(&ct_priv->shared_counter_lock);
- if (entry->tuple_node.next)
+ if (mlx5_tc_ct_entry_has_nat(entry))
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
mutex_unlock(&ct_priv->shared_counter_lock);
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
}
@@ -956,7 +983,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
- mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 7943eb30b837..4880f2179273 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto;
};
+static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
+}
+
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 899b98aca0d3..1fae7fab8297 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
}
static inline void
-mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0;
@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
}
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+ if (skb_vlan_tag_present(skb) && ihs)
+ mlx5e_eseg_swp_offsets_add_vlan(eseg);
}
#else
@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, eseg);
+ mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 6c5c54bcd9be..5cb936541b9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -76,7 +76,7 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
- return NUM_IPSEC_SW_COUNTERS;
+ return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
}
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
@@ -105,7 +105,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
- return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+ return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index d20243d6a032..f23c67575073 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1151,6 +1151,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
struct mlx5e_channels new_channels = {};
bool reset_channels = true;
+ bool opened;
int err = 0;
mutex_lock(&priv->state_lock);
@@ -1159,22 +1160,24 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
trust_state);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (!opened)
reset_channels = false;
- }
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
reset_channels = false;
- if (reset_channels)
+ if (reset_channels) {
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_update_trust_state_hw,
&trust_state);
- else
+ } else {
err = mlx5e_update_trust_state_hw(priv, &trust_state);
+ if (!err && !opened)
+ priv->channels.params = new_channels.params;
+ }
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d9076d543104..302001d6661e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -447,12 +447,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
- new_channels.params = priv->channels.params;
+ new_channels.params = *cur_params;
new_channels.params.num_channels = count;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
*cur_params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
+ if (err)
+ *cur_params = old_params;
+
goto out;
}
@@ -1010,6 +1016,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
}
+static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
+ const unsigned long link_modes, u8 autoneg)
+{
+ /* Extended link-mode has no speed limitations. */
+ if (ext)
+ return 0;
+
+ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+ autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
@@ -1103,13 +1125,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
- if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
- autoneg != AUTONEG_ENABLE) {
- netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
- __func__);
- err = -EINVAL;
+ err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
+ if (err)
goto out;
- }
link_modes = link_modes & eproto.cap;
if (!link_modes) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index fa8149f6eb08..e02e5895703d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1390,6 +1392,7 @@ err_destroy_groups:
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
kvfree(in);
+ kfree(ft->g);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 03831650f655..3fc7d18ac868 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1987,7 +1987,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
- c->irq_desc = irq_to_desc(irq);
+ c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
mlx5_set_port_admin_status(mdev, state);
- if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
+ if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
+ !MLX5_CAP_GEN(mdev, uplink_follow))
return;
if (state == MLX5_PORT_UP)
@@ -3613,18 +3614,23 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
new_channels.params.num_tc = tc ? tc : 1;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ struct mlx5e_params old_params;
+
+ old_params = priv->channels.params;
priv->channels.params = new_channels.params;
+ err = mlx5e_num_channels_changed(priv);
+ if (err)
+ priv->channels.params = old_params;
+
goto out;
}
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_num_channels_changed_ctx, NULL);
- if (err)
- goto out;
- priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
- new_channels.params.num_tc);
out:
+ priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
+ priv->channels.params.num_tc);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -3756,7 +3762,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
- struct mlx5e_params *old_params;
+ struct mlx5e_params *cur_params;
int err = 0;
bool reset;
@@ -3769,8 +3775,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
goto out;
}
- old_params = &priv->channels.params;
- if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ cur_params = &priv->channels.params;
+ if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
err = -EINVAL;
goto out;
@@ -3778,18 +3784,23 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- new_channels.params = *old_params;
+ new_channels.params = *cur_params;
new_channels.params.lro_en = enable;
- if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
+ if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
reset = false;
}
if (!reset) {
- *old_params = new_channels.params;
+ struct mlx5e_params old_params;
+
+ old_params = *cur_params;
+ *cur_params = new_channels.params;
err = mlx5e_modify_tirs_lro(priv);
+ if (err)
+ *cur_params = old_params;
goto out;
}
@@ -4066,9 +4077,16 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
}
if (!reset) {
+ unsigned int old_mtu = params->sw_mtu;
+
params->sw_mtu = new_mtu;
- if (preactivate)
- preactivate(priv, NULL);
+ if (preactivate) {
+ err = preactivate(priv, NULL);
+ if (err) {
+ params->sw_mtu = old_mtu;
+ goto out;
+ }
+ }
netdev->mtu = params->sw_mtu;
goto out;
}
@@ -5026,7 +5044,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 989c70c1eda3..f0ceae65f6cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -737,7 +737,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_NETNS_LOCAL;
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
+#endif
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7f5851c61218..ca4b55839a8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1262,8 +1262,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb))
+ if (!mlx5e_tc_update_skb(cqe, skb)) {
+ dev_kfree_skb_any(skb);
goto free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
@@ -1316,8 +1318,10 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ dev_kfree_skb_any(skb);
goto free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
@@ -1371,8 +1375,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
+ }
napi_gro_receive(rq->cq.napi, skb);
@@ -1528,8 +1534,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb))
+ if (!mlx5e_tc_update_skb(cqe, skb)) {
+ dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
+ }
napi_gro_receive(rq->cq.napi, skb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 4cdf834fa74a..dd0bfbacad47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -67,6 +67,7 @@
#include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
+#include <asm/div64.h>
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
@@ -1162,6 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
@@ -1192,6 +1196,9 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{
flow_flag_clear(flow, OFFLOADED);
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ goto offload_rule_0;
+
if (flow_flag_test(flow, CT)) {
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
@@ -1200,6 +1207,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+offload_rule_0:
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
@@ -2269,8 +2277,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
- netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
- dissector->used_keys);
+ netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -5007,13 +5015,13 @@ errout:
return err;
}
-static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch *esw;
+ u32 rate_mbps = 0;
u16 vport_num;
- u32 rate_mbps;
int err;
vport_num = rpriv->rep->vport;
@@ -5030,7 +5038,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
* Moreover, if rate is non zero we choose to configure to a minimum of
* 1 mbit/sec.
*/
- rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+ if (rate) {
+ rate = (rate * BITS_PER_BYTE) + 500000;
+ rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
+ }
+
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e47e2a0059d0..61ed671fe741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+ if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
+ attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 1ec3d62f026d..a3cfe06d5116 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -40,12 +40,8 @@
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
int current_cpu = smp_processor_id();
- const struct cpumask *aff;
- struct irq_data *idata;
- idata = irq_desc_get_irq_data(c->irq_desc);
- aff = irq_data_get_affinity_mask(idata);
- return cpumask_test_cpu(current_cpu, aff);
+ return cpumask_test_cpu(current_cpu, c->aff_mask);
}
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 2b85d4777303..3e19b1721303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
return 0;
}
- if (!IS_ERR_OR_NULL(vport->egress.acl))
- return 0;
-
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- table_size);
- if (IS_ERR(vport->egress.acl)) {
- err = PTR_ERR(vport->egress.acl);
- vport->egress.acl = NULL;
- goto out;
+ if (!vport->egress.acl) {
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ table_size);
+ if (IS_ERR(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ goto out;
+ }
+
+ err = esw_acl_egress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
}
- err = esw_acl_egress_lgcy_groups_create(esw, vport);
- if (err)
- goto out;
-
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b899539a0786..ee4d86c1f436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1141,6 +1141,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
destroy_ft:
root->cmds->destroy_flow_table(root, ft);
free_ft:
+ rhltable_destroy(&ft->fgs_hash);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
@@ -1759,6 +1760,7 @@ search_again_locked:
if (!fte_tmp)
continue;
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+ /* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte);
@@ -1811,6 +1813,8 @@ skip_search:
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
return rule;
}
rule = ERR_PTR(-ENOENT);
@@ -1909,6 +1913,8 @@ search_again_locked:
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
tree_put_node(&g->node, false);
return rule;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index f3d45ef082cd..83a05371e2aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
struct mlx5_core_dev *tmp_dev;
int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
- tmp_dev = ldev->pf[i].dev;
- if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
- MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (!ldev->pf[i].dev)
break;
- }
if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 3a9fa629503f..d046db7bb047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -90,4 +90,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c08315b51fd3..ca6f2fc39ea0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
dev->priv.adev_idx = mlx5_adev_idx_alloc();
- if (dev->priv.adev_idx < 0)
- return dev->priv.adev_idx;
+ if (dev->priv.adev_idx < 0) {
+ err = dev->priv.adev_idx;
+ goto adev_init_err;
+ }
err = mlx5_mdev_init(dev, prof_sel);
if (err)
@@ -1403,6 +1405,7 @@ pci_init_err:
mlx5_mdev_uninit(dev);
mdev_init_err:
mlx5_adev_idx_free(dev->priv.adev_idx);
+adev_init_err:
mlx5_devlink_free(devlink);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index eb956ce904bc..c0656d4782e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -58,7 +58,7 @@ struct fw_page {
struct rb_node rb_node;
u64 addr;
struct page *page;
- u16 func_id;
+ u32 function;
unsigned long bitmask;
struct list_head list;
unsigned free_count;
@@ -74,12 +74,17 @@ enum {
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
-static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
+static u32 get_function(u16 func_id, bool ec_function)
+{
+ return (u32)func_id | (ec_function << 16);
+}
+
+static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
int err;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (root)
return root;
@@ -87,7 +92,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
if (!root)
return ERR_PTR(-ENOMEM);
- err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
+ err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
@@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
return root;
}
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
{
struct rb_node *parent = NULL;
struct rb_root *root;
@@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
struct fw_page *tfp;
int i;
- root = page_root_per_func_id(dev, func_id);
+ root = page_root_per_function(dev, function);
if (IS_ERR(root))
return PTR_ERR(root);
@@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
nfp->addr = addr;
nfp->page = page;
- nfp->func_id = func_id;
+ nfp->function = function;
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
set_bit(i, &nfp->bitmask);
@@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
}
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
- u32 func_id)
+ u32 function)
{
struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return NULL;
@@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
{
struct fw_page *fp = NULL;
struct fw_page *iter;
unsigned n;
list_for_each_entry(iter, &dev->priv.free_list, list) {
- if (iter->func_id != func_id)
+ if (iter->function != function)
continue;
fp = iter;
}
@@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
{
struct rb_root *root;
- root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
+ root = xa_load(&dev->priv.page_root_xa, fwp->function);
if (WARN_ON_ONCE(!root))
return;
@@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
kfree(fwp);
}
-static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
{
struct fw_page *fwp;
int n;
- fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
@@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
list_add(&fwp->list, &dev->priv.free_list);
}
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
@@ -291,7 +296,7 @@ map:
goto map;
}
- err = insert_page(dev, addr, page, func_id);
+ err = insert_page(dev, addr, page, function);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
@@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
for (i = 0; i < npages; i++) {
retry:
- err = alloc_4k(dev, &addr, func_id);
+ err = alloc_4k(dev, &addr, function);
if (err) {
if (err == -ENOMEM)
- err = alloc_system_page(dev, func_id);
+ err = alloc_system_page(dev, function);
if (err)
goto out_4k;
@@ -384,7 +390,7 @@ retry:
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
out_free:
kvfree(in);
if (notify_fail)
@@ -392,14 +398,15 @@ out_free:
return err;
}
-static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
struct rb_root *root;
struct rb_node *p;
int npages = 0;
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return;
@@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
+ bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0;
@@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
+ ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
- root = xa_load(&dev->priv.page_root_xa, func_id);
+ root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
if (WARN_ON_ONCE(!root))
return -EEXIST;
@@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
return 0;
}
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
+static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int *nclaimed, bool ec_function)
{
+ u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
@@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
for (i = 0; i < num_claimed; i++)
- free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
if (nclaimed)
*nclaimed = num_claimed;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 0fc7de4aa572..8e0dddc6383f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -116,7 +116,7 @@ free:
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, 0, 0,
- NULL, NULL, false, 0, 0);
+ NULL, NULL, false, 0, 1);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 8fa286ccdd6b..bf85ce9835d7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -19,7 +19,7 @@
#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */
#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
-#define MLXSW_THERMAL_ASIC_TEMP_CRIT 110000 /* 110C */
+#define MLXSW_THERMAL_ASIC_TEMP_CRIT 140000 /* 140C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
@@ -176,6 +176,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
if (err)
return err;
+ if (crit_temp > emerg_temp) {
+ dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
+ tz->tzdev->type, crit_temp, emerg_temp);
+ return 0;
+ }
+
/* According to the system thermal requirements, the thermal zones are
* defined with four trip points. The critical and emergency
* temperature thresholds, provided by QSFP module are set as "active"
@@ -190,11 +196,8 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
- if (emerg_temp > crit_temp)
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
MLXSW_THERMAL_MODULE_TEMP_SHIFT;
- else
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index c6c5826aba41..1892cea05ee7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -157,6 +157,7 @@ mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp1_span_cpu_can_handle,
.parms_set = mlxsw_sp1_span_entry_cpu_parms,
.configure = mlxsw_sp1_span_entry_cpu_configure,
@@ -214,6 +215,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+ .is_static = true,
.can_handle = mlxsw_sp_port_dev_check,
.parms_set = mlxsw_sp_span_entry_phys_parms,
.configure = mlxsw_sp_span_entry_phys_configure,
@@ -721,6 +723,7 @@ mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
+ .is_static = true,
.can_handle = mlxsw_sp2_span_cpu_can_handle,
.parms_set = mlxsw_sp2_span_entry_cpu_parms,
.configure = mlxsw_sp2_span_entry_cpu_configure,
@@ -1036,6 +1039,9 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work)
if (!refcount_read(&curr->ref_count))
continue;
+ if (curr->ops->is_static)
+ continue;
+
err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
if (err)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index d907718bc8c5..aa1cd409c0e2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -60,6 +60,7 @@ struct mlxsw_sp_span_entry {
};
struct mlxsw_sp_span_entry_ops {
+ bool is_static;
bool (*can_handle)(const struct net_device *to_dev);
int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index dbd6c3946aa6..3804310c853a 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1935,6 +1935,14 @@ static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
length, GFP_ATOMIC | GFP_DMA);
}
+static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
+{
+ /* update the tail once per 8 descriptors */
+ if ((index & 7) == 7)
+ lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
+ index);
+}
+
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
struct sk_buff *skb)
{
@@ -1965,6 +1973,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
descriptor->data0 = (RX_DESC_DATA0_OWN_ |
(length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
+ lan743x_rx_update_tail(rx, index);
return 0;
}
@@ -1983,6 +1992,7 @@ static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
descriptor->data0 = (RX_DESC_DATA0_OWN_ |
((buffer_info->buffer_length) &
RX_DESC_DATA0_BUF_LENGTH_MASK_));
+ lan743x_rx_update_tail(rx, index);
}
static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
@@ -2193,6 +2203,7 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
{
struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
struct lan743x_adapter *adapter = rx->adapter;
+ int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
u32 rx_tail_flags = 0;
int count;
@@ -2201,27 +2212,19 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
lan743x_csr_write(adapter, DMAC_INT_STS,
DMAC_INT_BIT_RXFRM_(rx->channel_number));
}
- count = 0;
- while (count < weight) {
- int rx_process_result = lan743x_rx_process_packet(rx);
-
- if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) {
- count++;
- } else if (rx_process_result ==
- RX_PROCESS_RESULT_NOTHING_TO_DO) {
+ for (count = 0; count < weight; count++) {
+ result = lan743x_rx_process_packet(rx);
+ if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
break;
- } else if (rx_process_result ==
- RX_PROCESS_RESULT_PACKET_DROPPED) {
- continue;
- }
}
rx->frame_count += count;
- if (count == weight)
- goto done;
+ if (count == weight || result == RX_PROCESS_RESULT_PACKET_RECEIVED)
+ return weight;
if (!napi_complete_done(napi, count))
- goto done;
+ return count;
+ /* re-arm interrupts, must write to rx tail on some chip variants */
if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
@@ -2231,10 +2234,10 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
INT_BIT_DMA_RX_(rx->channel_number));
}
- /* update RX_TAIL */
- lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
- rx_tail_flags | rx->last_tail);
-done:
+ if (rx_tail_flags)
+ lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
+ rx_tail_flags | rx->last_tail);
+
return count;
}
@@ -2378,7 +2381,7 @@ static int lan743x_rx_open(struct lan743x_rx *rx)
netif_napi_add(adapter->netdev,
&rx->napi, lan743x_rx_napi_poll,
- rx->ring_size - 1);
+ NAPI_POLL_WEIGHT);
lan743x_csr_write(adapter, DMAC_CMD,
DMAC_CMD_RX_SWR_(rx->channel_number));
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0b9992bd6626..ff87a0bc089c 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -60,14 +60,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type)
{
+ u32 cmd = ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_DEST_IDX(port) |
+ ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
+ unsigned int mc_ports;
+
+ /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
+ if (type == ENTRYTYPE_MACv4)
+ mc_ports = (mac[1] << 8) | mac[2];
+ else if (type == ENTRYTYPE_MACv6)
+ mc_ports = (mac[0] << 8) | mac[1];
+ else
+ mc_ports = 0;
+
+ if (mc_ports & BIT(ocelot->num_phys_ports))
+ cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
- ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
- ANA_TABLES_MACACCESS_DEST_IDX(port) |
- ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
- ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
- ANA_TABLES_MACACCESS);
+ ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
return ocelot_mact_wait_for_completion(ocelot);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 2bd2840d88bd..42230f92ca9c 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1042,10 +1042,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
- if (!ocelot_netdevice_dev_check(dev))
- return 0;
-
if (event == NETDEV_PRECHANGEUPPER &&
+ ocelot_netdevice_dev_check(dev) &&
netif_is_lag_master(info->upper_dev)) {
struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
struct netlink_ext_ack *extack;
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 1e7729421a82..9cf2bc5f4289 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -1267,7 +1267,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
err = mscc_ocelot_init_ports(pdev, ports);
if (err)
- goto out_put_ports;
+ goto out_ocelot_deinit;
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
@@ -1282,8 +1282,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
register_switchdev_notifier(&ocelot_switchdev_nb);
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
+ of_node_put(ports);
+
dev_info(&pdev->dev, "Ocelot switch probed\n");
+ return 0;
+
+out_ocelot_deinit:
+ ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
return err;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 776b7d264dc3..2289e1fe3741 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out;
+ goto undo_probe;
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(dev);
@@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
err = register_netdev(ndev);
if (err)
- goto out;
+ goto undo_probe;
nubus_set_drvdata(board, ndev);
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(ndev);
return err;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index afa166ff7aef..28d9e98db81a 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
sonic_msg_init(dev);
if ((err = register_netdev(dev)))
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index bb448c82cdc2..c029950a81e2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -860,9 +860,6 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
- flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
- nfp_flower_setup_indr_tc_release);
-
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app);
@@ -951,6 +948,9 @@ static int nfp_flower_start(struct nfp_app *app)
static void nfp_flower_stop(struct nfp_app *app)
{
nfp_tunnel_config_stop(app);
+
+ flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
+ nfp_flower_setup_indr_tc_release);
}
static int
diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig
index 01229190132d..dcfbfa516e67 100644
--- a/drivers/net/ethernet/ni/Kconfig
+++ b/drivers/net/ethernet/ni/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# National Instuments network device configuration
+# National Instruments network device configuration
#
config NET_VENDOR_NI
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 9156c9825a16..ac4cd5d82e69 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int i, j;
unsigned int len;
- len = netdev->mtu + ETH_HLEN;
+ len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 4366c7a8de95..6b5ddb07ee83 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -78,6 +78,7 @@ config QED
depends on PCI
select ZLIB_INFLATE
select CRC8
+ select CRC32
select NET_DEVLINK
help
This enables the support for Marvell FastLinQ adapters family.
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f21847739ef1..d258e0ccf946 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -564,11 +564,6 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_set_features = netxen_set_features,
};
-static inline bool netxen_function_zero(struct pci_dev *pdev)
-{
- return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
-}
-
static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
u32 mode)
{
@@ -664,7 +659,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter)
netxen_initialize_interrupt_registers(adapter);
netxen_set_msix_bit(pdev, 0);
- if (netxen_function_zero(pdev)) {
+ if (adapter->portnum == 0) {
if (!netxen_setup_msi_interrupts(adapter, num_msix))
netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
else
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index a2494bf85007..ca0ee29a57b5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1799,6 +1799,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
+ } else if (l4_proto == IPPROTO_IPIP) {
+ /* IPIP tunnels are unknown to the device or at least unsupported natively,
+ * offloads for them can't be done trivially, so disable them for such skb.
+ */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 5a7e240fd469..c2faf96fcade 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2492,6 +2492,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_sriov_vf_register_map(ahw);
break;
default:
+ err = -EINVAL;
goto err_out_free_hw_res;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46d8510b2fe2..0d78408b4e26 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2207,7 +2207,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
@@ -2233,7 +2234,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
@@ -4044,17 +4046,72 @@ err_out:
return -EIO;
}
-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
+static bool rtl_skb_is_udp(struct sk_buff *skb)
{
+ int no = skb_network_offset(skb);
+ struct ipv6hdr *i6h, _i6h;
+ struct iphdr *ih, _ih;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih);
+ return ih && ih->protocol == IPPROTO_UDP;
+ case htons(ETH_P_IPV6):
+ i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h);
+ return i6h && i6h->nexthdr == IPPROTO_UDP;
+ default:
+ return false;
+ }
+}
+
+#define RTL_MIN_PATCH_LEN 47
+
+/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */
+static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ unsigned int padto = 0, len = skb->len;
+
+ if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
+ rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
+ unsigned int trans_data_len = skb_tail_pointer(skb) -
+ skb_transport_header(skb);
+
+ if (trans_data_len >= offsetof(struct udphdr, len) &&
+ trans_data_len < RTL_MIN_PATCH_LEN) {
+ u16 dest = ntohs(udp_hdr(skb)->dest);
+
+ /* dest is a standard PTP port */
+ if (dest == 319 || dest == 320)
+ padto = len + RTL_MIN_PATCH_LEN - trans_data_len;
+ }
+
+ if (trans_data_len < sizeof(struct udphdr))
+ padto = max_t(unsigned int, padto,
+ len + sizeof(struct udphdr) - trans_data_len);
+ }
+
+ return padto;
+}
+
+static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ unsigned int padto;
+
+ padto = rtl8125_quirk_udp_padto(tp, skb);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
- return true;
+ padto = max_t(unsigned int, padto, ETH_ZLEN);
default:
- return false;
+ break;
}
+
+ return padto;
}
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
@@ -4126,9 +4183,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
- if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
- /* eth_skb_pad would free the skb on error */
- return !__skb_put_padto(skb, ETH_ZLEN, false);
+ unsigned int padto = rtl_quirk_packet_padto(tp, skb);
+
+ /* skb_padto would free the skb on error */
+ return !__skb_put_padto(skb, padto, false);
}
return true;
@@ -4305,6 +4363,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
if (skb->len < ETH_ZLEN)
features &= ~NETIF_F_CSUM_MASK;
+ if (rtl_quirk_packet_padto(tp, skb))
+ features &= ~NETIF_F_CSUM_MASK;
+
if (transport_offset > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_CSUM_MASK;
@@ -4643,10 +4704,10 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
- phy_disconnect(tp->phydev);
-
free_irq(pci_irq_vector(pdev, 0), tp);
+ phy_disconnect(tp->phydev);
+
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c63304632935..590b088bc4c7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
- pm_runtime_put_sync(&mdp->pdev->dev);
-
mdp->is_opened = 0;
+ pm_runtime_put(&mdp->pdev->dev);
+
return 0;
}
@@ -3034,6 +3034,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
return 0;
}
+static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_read(bus, phy, reg);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_write(bus, phy, reg, val);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
/* MDIO bus init function */
static int sh_mdio_init(struct sh_eth_private *mdp,
struct sh_eth_plat_data *pd)
@@ -3058,6 +3080,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
if (!mdp->mii_bus)
return -ENOMEM;
+ /* Wrap accessors with Runtime PM-aware ops */
+ mdp->mii_bus->read = sh_mdiobb_read;
+ mdp->mii_bus->write = sh_mdiobb_write;
+
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = dev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index 82b1c7a5a7a9..ba0e4d2b256a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -129,7 +129,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set tx_clk\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
@@ -143,7 +143,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- return ret;
+ goto err_remove_config_dt;
}
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index a2e80c89de2d..103d2448e9e0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -375,6 +375,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
@@ -406,6 +407,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 3;
+ plat->addr64 = 32;
return ehl_common_data(pdev, plat);
}
@@ -721,6 +723,8 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
@@ -735,6 +739,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 459ae715b33d..f184b00f5116 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -135,7 +135,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
static const struct clk_parent_data mux_parents[] = {
{ .fw_name = "clkin0", },
- { .fw_name = "clkin1", },
+ { .index = -1, },
};
static const struct clk_div_table div_table[] = {
{ .div = 2, .val = 2, },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 58e0511badba..a5e0eff4a387 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -64,6 +64,7 @@ struct emac_variant {
* @variant: reference to the current board variant
* @regmap: regmap for using the syscon
* @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
@@ -74,6 +75,7 @@ struct sunxi_priv_data {
const struct emac_variant *variant;
struct regmap_field *regmap_field;
bool internal_phy_powered;
+ bool use_internal_phy;
void *mux_handle;
};
@@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.dma_interrupt = sun8i_dwmac_dma_interrupt,
};
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct sunxi_priv_data *gmac = priv;
int ret;
@@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
- if (gmac->regulator)
- regulator_disable(gmac->regulator);
dev_err(&pdev->dev, "Could not enable AHB clock\n");
- return ret;
+ goto err_disable_regulator;
+ }
+
+ if (gmac->use_internal_phy) {
+ ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+ if (ret)
+ goto err_disable_clk;
}
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(gmac->tx_clk);
+err_disable_regulator:
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+
+ return ret;
}
static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
u32 reg, val;
int ret = 0;
- bool need_power_ephy = false;
if (current_child ^ desired_child) {
regmap_field_read(gmac->regmap_field, &reg);
@@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
-
- need_power_ephy = true;
+ gmac->use_internal_phy = true;
break;
case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
dev_info(priv->device, "Switch mux to external PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
- need_power_ephy = false;
+ gmac->use_internal_phy = false;
break;
default:
dev_err(priv->device, "Invalid child ID %x\n",
@@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
return -EINVAL;
}
regmap_field_write(gmac->regmap_field, val);
- if (need_power_ephy) {
+ if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
return ret;
@@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
return ret;
}
-static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+static int sun8i_dwmac_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
{
- struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- struct device_node *node = priv->device->of_node;
+ struct sunxi_priv_data *gmac = plat->bsp_priv;
+ struct device_node *node = dev->of_node;
int ret;
u32 reg, val;
ret = regmap_field_read(gmac->regmap_field, &val);
if (ret) {
- dev_err(priv->device, "Fail to read from regmap field.\n");
+ dev_err(dev, "Fail to read from regmap field.\n");
return ret;
}
reg = gmac->variant->default_syscon_value;
if (reg != val)
- dev_warn(priv->device,
+ dev_warn(dev,
"Current syscon value is not the default %x (expect %x)\n",
val, reg);
@@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
- ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
+ ret = of_mdio_parse_addr(dev, plat->phy_node);
if (ret < 0) {
- dev_err(priv->device, "Could not parse MDIO addr\n");
+ dev_err(dev, "Could not parse MDIO addr\n");
return ret;
}
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+ dev_err(dev, "tx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set tx-delay to %x\n", val);
+ dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
reg &= ~(gmac->variant->tx_delay_max <<
SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid TX clock delay: %d\n",
+ dev_err(dev, "Invalid TX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+ dev_err(dev, "rx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set rx-delay to %x\n", val);
+ dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
reg &= ~(gmac->variant->rx_delay_max <<
SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid RX clock delay: %d\n",
+ dev_err(dev, "Invalid RX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (priv->plat->interface) {
+ switch (plat->interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
break;
default:
- dev_err(priv->device, "Unsupported interface mode: %s",
- phy_modes(priv->plat->interface));
+ dev_err(dev, "Unsupported interface mode: %s",
+ phy_modes(plat->interface));
return -EINVAL;
}
@@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
struct sunxi_priv_data *gmac = priv;
if (gmac->variant->soc_has_internal_phy) {
- /* sun8i_dwmac_exit could be called with mdiomux uninit */
- if (gmac->mux_handle)
- mdio_mux_uninit(gmac->mux_handle);
if (gmac->internal_phy_powered)
sun8i_dwmac_unpower_internal_phy(gmac);
}
- sun8i_dwmac_unset_syscon(gmac);
-
- reset_control_put(gmac->rst_ephy);
-
clk_disable_unprepare(gmac->tx_clk);
if (gmac->regulator)
@@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
{
struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv;
- int ret;
mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac)
return NULL;
- ret = sun8i_dwmac_set_syscon(priv);
- if (ret)
- return NULL;
-
mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
@@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
- if (IS_ERR(plat_dat))
- return PTR_ERR(plat_dat);
-
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
return -ENOMEM;
@@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = of_get_phy_mode(dev->of_node, &interface);
if (ret)
return -EINVAL;
- plat_dat->interface = interface;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
+ plat_dat->interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->has_sun8i = true;
@@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->exit = sun8i_dwmac_exit;
plat_dat->setup = sun8i_dwmac_setup;
+ ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+ if (ret)
+ goto dwmac_deconfig;
+
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto dwmac_syscon;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (gmac->variant->soc_has_internal_phy) {
ret = get_ephy_nodes(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
ret = sun8i_dwmac_register_mdio_mux(priv);
if (ret) {
dev_err(&pdev->dev, "Failed to register mux\n");
@@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
} else {
ret = sun8i_dwmac_reset(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
}
return ret;
dwmac_mux:
- sun8i_dwmac_unset_syscon(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+dwmac_remove:
+ stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
+ sun8i_dwmac_exit(pdev, gmac);
+dwmac_syscon:
+ sun8i_dwmac_unset_syscon(gmac);
+dwmac_deconfig:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ if (gmac->variant->soc_has_internal_phy) {
+ mdio_mux_uninit(gmac->mux_handle);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+ }
+
stmmac_pltfr_remove(pdev);
-return ret;
+ sun8i_dwmac_unset_syscon(gmac);
+
+ return 0;
}
static const struct of_device_id sun8i_dwmac_match[] = {
@@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = sun8i_dwmac_remove,
.driver = {
.name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 03e79a677c8b..8f7ac24545ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -568,68 +568,24 @@ static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
unsigned int ptp_rate)
{
- u32 speed, total_offset, offset, ctrl, ctr_low;
- u32 extcfg = readl(ioaddr + GMAC_EXT_CONFIG);
- u32 mac_cfg = readl(ioaddr + GMAC_CONFIG);
int i, ret = 0x0;
- u64 total_ctr;
-
- if (extcfg & GMAC_CONFIG_EIPG_EN) {
- offset = (extcfg & GMAC_CONFIG_EIPG) >> GMAC_CONFIG_EIPG_SHIFT;
- offset = 104 + (offset * 8);
- } else {
- offset = (mac_cfg & GMAC_CONFIG_IPG) >> GMAC_CONFIG_IPG_SHIFT;
- offset = 96 - (offset * 8);
- }
-
- speed = mac_cfg & (GMAC_CONFIG_PS | GMAC_CONFIG_FES);
- speed = speed >> GMAC_CONFIG_FES_SHIFT;
-
- switch (speed) {
- case 0x0:
- offset = offset * 1000; /* 1G */
- break;
- case 0x1:
- offset = offset * 400; /* 2.5G */
- break;
- case 0x2:
- offset = offset * 100000; /* 10M */
- break;
- case 0x3:
- offset = offset * 10000; /* 100M */
- break;
- default:
- return -EINVAL;
- }
-
- offset = offset / 1000;
+ u32 ctrl;
ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+ ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
+ ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
if (ret)
return ret;
- total_offset = 0;
for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i] + offset, true);
+ ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
if (ret)
return ret;
-
- total_offset += offset;
}
- total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
- total_ctr += total_offset;
-
- ctr_low = do_div(total_ctr, 1000000000);
-
- ret |= dwmac5_est_write(ioaddr, CTR_LOW, ctr_low, false);
- ret |= dwmac5_est_write(ioaddr, CTR_HIGH, total_ctr, false);
- if (ret)
- return ret;
-
ctrl = readl(ioaddr + MTL_EST_CONTROL);
ctrl &= ~PTOV;
ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5b1c12ff98c0..26b971cd4da5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2184,7 +2184,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule_irqoff(&ch->rx_napi);
+ __napi_schedule(&ch->rx_napi);
}
}
@@ -2193,7 +2193,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule_irqoff(&ch->tx_napi);
+ __napi_schedule(&ch->tx_napi);
}
}
@@ -4026,6 +4026,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
+ const int mtu = new_mtu;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
@@ -4043,7 +4044,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
- dev->mtu = new_mtu;
+ dev->mtu = mtu;
netdev_update_features(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index f5bed4d26e80..8ed3b2c834a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -599,7 +599,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
- struct timespec64 time;
+ struct timespec64 time, current_time;
+ ktime_t current_time_ns;
bool fpe = false;
int i, ret = 0;
u64 ctr;
@@ -694,7 +695,22 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
}
/* Adjust for real system time */
- time = ktime_to_timespec64(qopt->base_time);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ if (ktime_after(qopt->base_time, current_time_ns)) {
+ time = ktime_to_timespec64(qopt->base_time);
+ } else {
+ ktime_t base_time;
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
+ qopt->cycle_time);
+ base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+
+ time = ktime_to_timespec64(base_time);
+ }
+
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index d1fc7955d422..43222a34cba0 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -599,6 +599,7 @@ void cpts_unregister(struct cpts *cpts)
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
+ cpts->phc_index = -1;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
@@ -784,6 +785,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.read = cpts_systim_read;
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ cpts->phc_index = -1;
if (n_ext_ts)
cpts->info.n_ext_ts = n_ext_ts;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d9a5722f561b..3d1fc8d2ca66 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1791,7 +1791,7 @@ fail_open:
* ps3_gelic_driver_remove - remove a device from the control of this driver
*/
-static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
+static void ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
{
struct gelic_card *card = ps3_system_bus_get_drvdata(dev);
struct net_device *netdev0;
@@ -1840,7 +1840,6 @@ static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
ps3_close_hv_device(dev);
pr_debug("%s: done\n", __func__);
- return 0;
}
static struct ps3_system_bus_driver ps3_gelic_driver = {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a0f338cf1424..2a87cfa27ac0 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -847,6 +847,19 @@ struct nvsp_message {
#define NETVSC_XDP_HDRM 256
+#define NETVSC_MIN_OUT_MSG_SIZE (sizeof(struct vmpacket_descriptor) + \
+ sizeof(struct nvsp_message))
+#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
+
+/* Estimated requestor size:
+ * out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
+ */
+static inline u32 netvsc_rqstor_size(unsigned long ringbytes)
+{
+ return ringbytes / NETVSC_MIN_OUT_MSG_SIZE +
+ ringbytes / NETVSC_MIN_IN_MSG_SIZE;
+}
+
#define NETVSC_XFER_HEADER_SIZE(rng_cnt) \
(offsetof(struct vmtransfer_page_packet_header, ranges) + \
(rng_cnt) * sizeof(struct vmtransfer_page_range))
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index fa8341f8359a..2350342b961f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -50,7 +50,7 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
vmbus_sendpacket(dev->channel, init_pkt,
sizeof(struct nvsp_message),
- (unsigned long)init_pkt,
+ VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
}
@@ -163,7 +163,7 @@ static void netvsc_revoke_recv_buf(struct hv_device *device,
ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
- (unsigned long)revoke_packet,
+ VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
/* If the failure is because the channel is rescinded;
* ignore the failure since we cannot send on a rescinded
@@ -213,7 +213,7 @@ static void netvsc_revoke_send_buf(struct hv_device *device,
ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
- (unsigned long)revoke_packet,
+ VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
/* If the failure is because the channel is rescinded;
@@ -557,7 +557,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
- (unsigned long)init_packet,
+ VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
return ret;
@@ -614,7 +614,7 @@ static int netvsc_connect_vsp(struct hv_device *device,
/* Send the init request */
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
- (unsigned long)init_packet,
+ VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
if (ret != 0)
goto cleanup;
@@ -695,10 +695,19 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
- struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct sk_buff *skb;
u16 q_idx = 0;
int queue_sends;
+ u64 cmd_rqst;
+
+ cmd_rqst = vmbus_request_addr(&channel->requestor, (u64)desc->trans_id);
+ if (cmd_rqst == VMBUS_RQST_ERROR) {
+ netdev_err(ndev, "Incorrect transaction id\n");
+ return;
+ }
+
+ skb = (struct sk_buff *)(unsigned long)cmd_rqst;
/* Notify the layer above us */
if (likely(skb)) {
@@ -1520,6 +1529,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_poll, NAPI_POLL_WEIGHT);
/* Open the channel */
+ device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
ret = vmbus_open(device->channel, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, net_device->chan_table);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d17bbc75f5e7..f32f28311d57 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2050,11 +2050,11 @@ static void netvsc_link_change(struct work_struct *w)
container_of(w, struct net_device_context, dwork.work);
struct hv_device *device_obj = ndev_ctx->device_ctx;
struct net_device *net = hv_get_drvdata(device_obj);
+ unsigned long flags, next_reconfig, delay;
+ struct netvsc_reconfig *event = NULL;
struct netvsc_device *net_device;
struct rndis_device *rdev;
- struct netvsc_reconfig *event = NULL;
- bool notify = false, reschedule = false;
- unsigned long flags, next_reconfig, delay;
+ bool reschedule = false;
/* if changes are happening, comeback later */
if (!rtnl_trylock()) {
@@ -2103,7 +2103,7 @@ static void netvsc_link_change(struct work_struct *w)
netif_carrier_on(net);
netvsc_tx_enable(net_device, net);
} else {
- notify = true;
+ __netdev_notify_peers(net);
}
kfree(event);
break;
@@ -2132,9 +2132,6 @@ static void netvsc_link_change(struct work_struct *w)
rtnl_unlock();
- if (notify)
- netdev_notify_peers(net);
-
/* link_watch only sends one notification with current state per
* second, handle next reconfig event in 2 seconds.
*/
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2c2b55c32a7a..598713c0d5a8 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1173,6 +1173,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
/* Set the channel before opening.*/
nvchan->channel = new_sc;
+ new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
@@ -1224,6 +1225,11 @@ int rndis_set_subchannel(struct net_device *ndev,
return -EIO;
}
+ /* Check that number of allocated sub channel is within the expected range */
+ if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
+ netdev_err(ndev, "invalid number of allocated sub channel\n");
+ return -EINVAL;
+ }
nvdev->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index c4795249719d..34e5f2155d62 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -326,8 +326,8 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
}
/* Issue an event ring command and wait for it to complete */
-static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
- enum gsi_evt_cmd_opcode opcode)
+static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
@@ -340,7 +340,13 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
* is issued here. Only permit *this* event ring to trigger
* an interrupt, and only enable the event control IRQ type
* when we expect it to occur.
+ *
+ * There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
*/
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
val = BIT(evt_ring_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_EV_CTRL);
@@ -355,19 +361,16 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
if (success)
- return 0;
+ return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
-
- return -ETIMEDOUT;
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
@@ -377,14 +380,16 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return -EINVAL;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
- evt_ring_id, evt_ring->state);
- ret = -EIO;
- }
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- return ret;
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return 0;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+ evt_ring_id, evt_ring->state);
+
+ return -EIO;
}
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
@@ -392,7 +397,6 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
enum gsi_evt_ring_state state = evt_ring->state;
- int ret;
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
@@ -401,17 +405,20 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+ evt_ring_id, evt_ring->state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
@@ -419,17 +426,21 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+ evt_ring_id, evt_ring->state);
}
/* Fetch the current state of a channel from hardware */
static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
u32 channel_id = gsi_channel_id(channel);
- void *virt = channel->gsi->virt;
+ void __iomem *virt = channel->gsi->virt;
u32 val;
val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
@@ -438,7 +449,7 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
}
/* Issue a channel command and wait for it to complete */
-static int
+static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
struct completion *completion = &channel->completion;
@@ -453,7 +464,13 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
* issued here. So we only permit *this* channel to trigger
* an interrupt and only enable the channel control IRQ type
* when we expect it to occur.
+ *
+ * There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
*/
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
val = BIT(channel_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_CH_CTRL);
@@ -467,12 +484,10 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
if (success)
- return 0;
+ return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
-
- return -ETIMEDOUT;
}
/* Allocate GSI channel in NOT_ALLOCATED state */
@@ -481,7 +496,6 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
/* Get initial channel state */
state = gsi_channel_state(channel);
@@ -491,17 +505,17 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+ gsi_channel_command(channel, GSI_CH_ALLOCATE);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "channel %u bad state %u after alloc\n",
- channel_id, state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_ALLOCATED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after alloc\n",
+ channel_id, state);
+
+ return -EIO;
}
/* Start an ALLOCATED channel */
@@ -509,7 +523,6 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
@@ -519,17 +532,17 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_START);
+ gsi_channel_command(channel, GSI_CH_START);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(dev, "channel %u bad state %u after start\n",
- gsi_channel_id(channel), state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_STARTED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after start\n",
+ gsi_channel_id(channel), state);
+
+ return -EIO;
}
/* Stop a GSI channel in STARTED state */
@@ -537,7 +550,6 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
@@ -554,12 +566,12 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_STOP);
+ gsi_channel_command(channel, GSI_CH_STOP);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (ret || state == GSI_CHANNEL_STATE_STOPPED)
- return ret;
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
/* We may have to try again if stop is in progress */
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
@@ -576,7 +588,6 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
msleep(1); /* A short delay is required before a RESET command */
@@ -590,11 +601,11 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_RESET);
+ gsi_channel_command(channel, GSI_CH_RESET);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(dev, "channel %u bad state %u after reset\n",
gsi_channel_id(channel), state);
}
@@ -605,7 +616,6 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
@@ -614,11 +624,12 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+ gsi_channel_command(channel, GSI_CH_DE_ALLOC);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
dev_err(dev, "channel %u bad state %u after dealloc\n",
channel_id, state);
}
@@ -1362,7 +1373,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
/* Hardware requires a 2^n ring size, with alignment equal to size */
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (ring->virt && addr % size) {
- dma_free_coherent(dev, size, ring->virt, ring->addr);
+ dma_free_coherent(dev, size, ring->virt, addr);
dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
size);
return -EINVAL; /* Not a good error value, but distinct */
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 9dcf16f399b7..135c393437f1 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -115,13 +115,13 @@ static int ipa_interconnect_enable(struct ipa *ipa)
return ret;
data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
+ ret = icc_set_bw(clock->imem_path, data->average_rate,
data->peak_rate);
if (ret)
goto err_memory_path_disable;
data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
+ ret = icc_set_bw(clock->config_path, data->average_rate,
data->peak_rate);
if (ret)
goto err_imem_path_disable;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 9f4be9812a1f..612afece303f 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -588,7 +588,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->data->qmap)
- val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
+ val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -1164,8 +1164,8 @@ static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
return true;
if (!status->pkt_len)
return true;
- endpoint_id = u32_get_bits(status->endp_dst_idx,
- IPA_STATUS_DST_IDX_FMASK);
+ endpoint_id = u8_get_bits(status->endp_dst_idx,
+ IPA_STATUS_DST_IDX_FMASK);
if (endpoint_id != endpoint->endpoint_id)
return true;
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 0cc3a3374caa..f25029b9ec85 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -336,7 +336,7 @@ static void ipa_imem_exit(struct ipa *ipa)
size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
if (size != ipa->imem_size)
- dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
size, ipa->imem_size);
} else {
dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
@@ -440,7 +440,7 @@ static void ipa_smem_exit(struct ipa *ipa)
size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
if (size != ipa->smem_size)
- dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
size, ipa->smem_size);
} else {
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index e34fe2d77324..9b08eb823984 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -216,6 +216,7 @@ int ipa_modem_start(struct ipa *ipa)
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 5136275c8e73..d3915f831854 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
return dev_addr;
}
-static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
@@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
mdiobb_get_bit(ctrl);
return ret;
}
+EXPORT_SYMBOL(mdiobb_read);
-static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
@@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
mdiobb_get_bit(ctrl);
return 0;
}
+EXPORT_SYMBOL(mdiobb_write);
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 33372756a451..ddb78fb4d6dc 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -317,7 +317,8 @@ static int smsc_phy_probe(struct phy_device *phydev)
/* Make clk optional to keep DTB backward compatibility. */
priv->refclk = clk_get_optional(dev, NULL);
if (IS_ERR(priv->refclk))
- dev_err_probe(dev, PTR_ERR(priv->refclk), "Failed to request clock\n");
+ return dev_err_probe(dev, PTR_ERR(priv->refclk),
+ "Failed to request clock\n");
ret = clk_prepare_enable(priv->refclk);
if (ret)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 09c27f7773f9..d445ecb1d0c7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -623,6 +623,7 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
write_unlock_bh(&pch->upl);
return -EALREADY;
}
+ refcount_inc(&pchb->file.refcnt);
rcu_assign_pointer(pch->bridge, pchb);
write_unlock_bh(&pch->upl);
@@ -632,19 +633,24 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
write_unlock_bh(&pchb->upl);
goto err_unset;
}
+ refcount_inc(&pch->file.refcnt);
rcu_assign_pointer(pchb->bridge, pch);
write_unlock_bh(&pchb->upl);
- refcount_inc(&pch->file.refcnt);
- refcount_inc(&pchb->file.refcnt);
-
return 0;
err_unset:
write_lock_bh(&pch->upl);
+ /* Re-read pch->bridge with upl held in case it was modified concurrently */
+ pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
RCU_INIT_POINTER(pch->bridge, NULL);
write_unlock_bh(&pch->upl);
synchronize_rcu();
+
+ if (pchb)
+ if (refcount_dec_and_test(&pchb->file.refcnt))
+ ppp_destroy_channel(pchb);
+
return -EALREADY;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c19dac21c468..dd7917cab2b1 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -992,7 +992,8 @@ static void __team_compute_features(struct team *team)
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
- list_for_each_entry(port, &team->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
@@ -1006,6 +1007,7 @@ static void __team_compute_features(struct team *team)
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
+ rcu_read_unlock();
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
@@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team)
static void team_compute_features(struct team *team)
{
- mutex_lock(&team->lock);
__team_compute_features(team);
- mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fbed05ae7b0f..978ac0981d16 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1365,7 +1365,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int i;
if (it->nr_segs > MAX_SKB_FRAGS + 1)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EMSGSIZE);
local_bh_disable();
skb = napi_get_frags(&tfile->napi);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 1e3719028780..fbbe78643631 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -631,7 +631,6 @@ config USB_NET_AQC111
config USB_RTL8153_ECM
tristate "RTL8153 ECM support"
depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
- default y
help
This option supports ECM mode for RTL8153 ethernet adapter, when
CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8c1d61c2cbac..a9b551028659 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -793,6 +793,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
@@ -962,6 +969,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 2bac57d5e8d5..291e76d32abe 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ delayed_ndp_size = ctx->max_ndp_size +
+ max_t(u32,
+ ctx->tx_ndp_modulus,
+ ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
@@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len;
- skb_put_zero(skb_out, padding_count);
+ if (!WARN_ON(padding_count > ctx->tx_curr_size))
+ skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */
@@ -1823,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
+ /* if the speed hasn't changed, don't report it.
+ * RTL8156 shipped before 2021 sends notification about every 32ms.
+ */
+ if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
+ return;
+
+ dev->rx_speed = rx_speed;
+ dev->tx_speed = tx_speed;
+
/*
* Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead.
@@ -1863,10 +1876,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- netif_info(dev, link, dev->net,
- "network connection: %sconnected\n",
- !!event->wValue ? "" : "dis");
- usbnet_link_change(dev, !!event->wValue, 0);
+ if (netif_carrier_ok(dev->net) != !!event->wValue)
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d166c321ee9b..cc4819282820 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1013,6 +1013,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
@@ -1301,6 +1302,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c448d6089821..67cd6986634f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -6877,6 +6877,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
index 2c3fabd38b16..20b2df8d74ae 100644
--- a/drivers/net/usb/r8153_ecm.c
+++ b/drivers/net/usb/r8153_ecm.c
@@ -122,12 +122,20 @@ static const struct driver_info r8153_info = {
};
static const struct usb_device_id products[] = {
+/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
{
USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&r8153_info,
},
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0x721e, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
+
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 6609d21ef894..f813ca9dec53 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -387,7 +387,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
reply_len = sizeof *phym;
retval = rndis_query(dev, intf, u.buf,
RNDIS_OID_GEN_PHYSICAL_MEDIUM,
- 0, (void **) &phym, &reply_len);
+ reply_len, (void **)&phym, &reply_len);
if (retval != 0 || !phym) {
/* OID is optional so don't fail here. */
phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 052975ea0af4..508408fbe78f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2093,14 +2093,16 @@ static int virtnet_set_channels(struct net_device *dev,
get_online_cpus();
err = _virtnet_set_queues(vi, queue_pairs);
- if (!err) {
- netif_set_real_num_tx_queues(dev, queue_pairs);
- netif_set_real_num_rx_queues(dev, queue_pairs);
-
- virtnet_set_affinity(vi);
+ if (err) {
+ put_online_cpus();
+ goto err;
}
+ virtnet_set_affinity(vi);
put_online_cpus();
+ netif_set_real_num_tx_queues(dev, queue_pairs);
+ netif_set_real_num_rx_queues(dev, queue_pairs);
+ err:
return err;
}
@@ -3072,6 +3074,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev_err(&vdev->dev,
"device MTU appears to have changed it is now %d < %d",
mtu, dev->min_mtu);
+ err = -EINVAL;
goto free;
}
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4029fde71a9e..83c9481995dd 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -282,6 +282,7 @@ config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
+ select BITREVERSE
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 64f855651336..261b53fc8e04 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&ppp->lock, flags);
+ /* mod_timer could be called after we entered this function but
+ * before we got the lock.
+ */
+ if (timer_pending(&proto->timer)) {
+ spin_unlock_irqrestore(&ppp->lock, flags);
+ return;
+ }
switch (proto->state) {
case STOPPING:
case REQ_SENT:
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index c33e2c81635f..410b318e57fb 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -49,7 +49,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
rt = dst_cache_get_ip4(cache, &fl.saddr);
if (!rt) {
- security_sk_classify_flow(sock, flowi4_to_flowi(&fl));
+ security_sk_classify_flow(sock, flowi4_to_flowi_common(&fl));
if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
fl.saddr, RT_SCOPE_HOST))) {
endpoint->src4.s_addr = 0;
@@ -129,7 +129,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
dst = dst_cache_get_ip6(cache, &fl.saddr);
if (!dst) {
- security_sk_classify_flow(sock, flowi6_to_flowi(&fl));
+ security_sk_classify_flow(sock, flowi6_to_flowi_common(&fl));
if (unlikely(!ipv6_addr_any(&fl.saddr) &&
!ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) {
endpoint->src6 = fl.saddr = in6addr_any;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 5db6bff5193b..68254a967ccb 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -497,7 +497,7 @@ static int remove_buf_file_handler(struct dentry *dentry)
return 0;
}
-static struct rchan_callbacks rfs_spec_scan_cb = {
+static const struct rchan_callbacks rfs_spec_scan_cb = {
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index b97c38b9a270..350b7913622c 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -185,7 +185,7 @@ int ath11k_core_suspend(struct ath11k_base *ab)
ath11k_hif_ce_irq_disable(ab);
ret = ath11k_hif_suspend(ab);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 205c0f1a40e9..920e5026a635 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2294,6 +2294,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
{
u8 channel_num;
u32 center_freq;
+ struct ieee80211_channel *channel;
rx_status->freq = 0;
rx_status->rate_idx = 0;
@@ -2314,9 +2315,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
- rx_status->band = ar->rx_channel->band;
- channel_num =
- ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
spin_unlock_bh(&ar->data_lock);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
rx_desc, sizeof(struct hal_rx_desc));
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 5c175e3e09b2..c1608f64ea95 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3021,6 +3021,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
ret = ath11k_start_vdev_delay(ar->hw, vif);
if (ret) {
@@ -5284,7 +5285,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
@@ -5295,7 +5297,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay) {
+ if (ab->hw_params.vdev_start_delay &&
+ (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr;
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 857647aa57c8..20b415cd96c4 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -274,7 +274,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
return ret;
}
@@ -283,7 +283,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
return ret;
}
@@ -292,7 +292,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
return ret;
}
@@ -301,7 +301,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
return ret;
}
@@ -886,6 +886,32 @@ static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
pci_disable_device(pci_dev);
}
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl);
+}
+
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -895,6 +921,11 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath11k_pci_sw_reset(ab_pci->ab, true);
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath11k_pci_aspm_disable(ab_pci);
+
ret = ath11k_mhi_start(ab_pci);
if (ret) {
ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -908,6 +939,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ /* restore aspm in case firmware bootup fails */
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_force_wake(ab_pci->ab);
ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
@@ -965,6 +999,8 @@ static int ath11k_pci_start(struct ath11k_base *ab)
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 0432a702416b..fe44d0dfce19 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -63,6 +63,7 @@ struct ath11k_msi_config {
enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
+ ATH11K_PCI_ASPM_RESTORE,
};
struct ath11k_pci {
@@ -80,6 +81,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
+ u16 link_ctl;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 1866d82678fa..b69e7ebfa930 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -76,6 +76,23 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
return NULL;
}
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return peer;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return NULL;
+}
+
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
{
struct ath11k_peer *peer;
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index bba2e00b6944..8553ed061aea 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -43,5 +43,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param);
int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index f0b5c50974f3..0db623ff4bb9 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1660,6 +1660,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0, i;
+ bool delayed;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@@ -1672,11 +1673,13 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
- if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+ if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+ delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
memset(req, 0, sizeof(*req));
} else {
+ delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
for (i = 0; i < req->mem_seg_len ; i++) {
@@ -1708,6 +1711,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
@@ -1742,6 +1751,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
int i;
struct target_mem_chunk *chunk;
+ ab->qmi.target_mem_delayed = false;
+
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
chunk->vaddr = dma_alloc_coherent(ab->dev,
@@ -1749,6 +1760,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
&chunk->paddr,
GFP_KERNEL);
if (!chunk->vaddr) {
+ if (ab->qmi.mem_seg_count <= 2) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
chunk->size,
chunk->type);
@@ -2517,7 +2537,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
ret);
return;
}
- } else if (msg->mem_seg_len > 2) {
+ } else {
ret = ath11k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 92925c9eac67..7bad374cc23a 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -125,6 +125,7 @@ struct ath11k_qmi {
struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
u32 mem_seg_count;
u32 target_mem_mode;
+ bool target_mem_delayed;
u8 cal_done;
struct target_info target;
struct m3_mem_region m3_mem;
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index ac2a8cfdc1c0..1afe67759659 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -148,7 +148,7 @@ static int remove_buf_file_handler(struct dentry *dentry)
return 0;
}
-static struct rchan_callbacks rfs_scan_cb = {
+static const struct rchan_callbacks rfs_scan_cb = {
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index da4b546b62cb..73869d445c5b 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -3460,6 +3460,9 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 21191955a7c1..e055adfb5361 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -1053,7 +1053,7 @@ static int remove_buf_file_handler(struct dentry *dentry)
return 0;
}
-static struct rchan_callbacks rfs_spec_scan_cb = {
+static const struct rchan_callbacks rfs_spec_scan_cb = {
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 6a95b199bf62..f074e9c31aa2 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -2,6 +2,7 @@
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
+ select CRC32
depends on CFG80211
depends on PCI
default n
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 7220fc8fd9b0..8280092066e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -314,6 +314,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
@@ -340,6 +341,18 @@ const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_b0_hr_b0 = {
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
@@ -366,6 +379,18 @@ const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_qu_c0_hr_b0 = {
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 15248b064380..d8b7776a8dde 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -80,19 +80,45 @@ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
}
/*
- * Evaluate a DSM with no arguments and a single u8 return value (inside a
- * buffer object), verify and return that value.
+ * Generic function to evaluate a DSM with no arguments
+ * and an integer return value,
+ * (as an integer object or inside a buffer object),
+ * verify and assign the value in the "value" parameter.
+ * return 0 in success and the appropriate errno otherwise.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ u64 *value, size_t expected_size)
{
union acpi_object *obj;
- int ret;
+ int ret = 0;
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "Failed to get DSM object. func= %d\n",
+ func);
return -ENOENT;
+ }
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *value = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ __le64 le_value = 0;
- if (obj->type != ACPI_TYPE_BUFFER) {
+ if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+ return -EINVAL;
+
+ /* if the buffer size doesn't match the expected size */
+ if (obj->buffer.length != expected_size)
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
+ obj->buffer.length);
+
+ /* assuming LE from Intel BIOS spec */
+ memcpy(&le_value, obj->buffer.pointer,
+ min_t(size_t, expected_size, (size_t)obj->buffer.length));
+ *value = le64_to_cpu(le_value);
+ } else {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method did not return a valid object, type=%d\n",
obj->type);
@@ -100,15 +126,6 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
goto out;
}
- if (obj->buffer.length != sizeof(u8)) {
- IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM method returned invalid buffer, length=%d\n",
- obj->buffer.length);
- ret = -EINVAL;
- goto out;
- }
-
- ret = obj->buffer.pointer[0];
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method evaluated: func=%d, ret=%d\n",
func, ret);
@@ -116,6 +133,24 @@ out:
ACPI_FREE(obj);
return ret;
}
+
+/*
+ * Evaluate a DSM with no arguments and a u8 return value,
+ */
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
+{
+ int ret;
+ u64 val;
+
+ ret = iwl_acpi_get_dsm_integer(dev, rev, func, &val, sizeof(u8));
+
+ if (ret < 0)
+ return ret;
+
+ /* cast val (u64) to be u8 */
+ *value = (u8)val;
+ return 0;
+}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 042dd247d387..1cce30d1ef55 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@@ -99,7 +99,7 @@ struct iwl_fw_runtime;
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
@@ -159,7 +159,8 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static inline
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
{
return -ENOENT;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 6d8f7bff1243..895a907acdf0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -224,40 +224,46 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait)
{
- const struct firmware *pnvm;
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
- char pnvm_name[64];
- int ret;
/* if the SKU_ID is empty, there's nothing to do */
if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
return 0;
- /* if we already have it, nothing to do either */
- if (trans->pnvm_loaded)
- return 0;
+ /* load from disk only if we haven't done it (or tried) before */
+ if (!trans->pnvm_loaded) {
+ const struct firmware *pnvm;
+ char pnvm_name[64];
+ int ret;
+
+ /*
+ * The prefix unfortunately includes a hyphen at the end, so
+ * don't add the dot here...
+ */
+ snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
+ trans->cfg->fw_name_pre);
+
+ /* ...but replace the hyphen with the dot here. */
+ if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
+ pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
+
+ ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+ if (ret) {
+ IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
+ pnvm_name, ret);
+ /*
+ * Pretend we've loaded it - at least we've tried and
+ * couldn't load it at all, so there's no point in
+ * trying again over and over.
+ */
+ trans->pnvm_loaded = true;
+ } else {
+ iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
- /*
- * The prefix unfortunately includes a hyphen at the end, so
- * don't add the dot here...
- */
- snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
- trans->cfg->fw_name_pre);
-
- /* ...but replace the hyphen with the dot here. */
- if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
- pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
-
- ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
- if (ret) {
- IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
- pnvm_name, ret);
- } else {
- iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
-
- release_firmware(pnvm);
+ release_firmware(pnvm);
+ }
}
iwl_init_notification_wait(notif_wait, &pnvm_wait,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 27cb0406ba9a..86e1d57df65e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __IWL_CONFIG_H__
@@ -445,7 +445,7 @@ struct iwl_cfg {
#define IWL_CFG_CORES_BT_GNSS 0x5
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9)
+#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
@@ -491,6 +491,7 @@ extern const char iwl9260_killer_1550_name[];
extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
+extern const char iwl_ax203_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@@ -574,6 +575,8 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
+extern const struct iwl_cfg iwl_qu_b0_hr_b0;
+extern const struct iwl_cfg iwl_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index a654147d3cd6..a80a35a7740f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -180,13 +180,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
- /* For safe using a string from FW make sure we have a
- * null terminator
- */
- reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
-
- IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
-
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 2ac20d0a30eb..2b7ef1583e7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -150,16 +150,17 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
+ mdelay(delay_ms);
iwl_write_prph_no_grab(trans, ofs, val);
iwl_trans_release_nic_access(trans, &flags);
}
}
-IWL_EXPORT_SYMBOL(iwl_write_prph);
+IWL_EXPORT_SYMBOL(iwl_write_prph_delay);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
@@ -219,8 +220,8 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
void iwl_force_nmi(struct iwl_trans *trans)
{
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
- iwl_write_prph(trans, DEVICE_SET_NMI_REG,
- DEVICE_SET_NMI_VAL_DRV);
+ iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
+ DEVICE_SET_NMI_VAL_DRV, 1);
else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 39bceee4e9e7..3c21c0e081f8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#ifndef __iwl_io_h__
#define __iwl_io_h__
@@ -37,7 +37,13 @@ u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs,
+ u32 val, u32 delay_ms);
+static inline void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ iwl_write_prph_delay(trans, ofs, val, 0);
+}
+
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 0b03fdedc1f7..1158e256f601 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -301,6 +301,12 @@
#define RADIO_RSP_ADDR_POS (6)
#define RADIO_RSP_RD_CMD (3)
+/* LTR control (Qu only) */
+#define HPM_MAC_LTR_CSR 0xa0348c
+#define HPM_MAC_LRT_ENABLE_ALL 0xf
+/* also uses CSR_LTR_* for values */
+#define HPM_UMAC_LTR 0xa03480
+
/* FW monitor */
#define MON_BUFF_SAMPLE_CTL (0xa03c00)
#define MON_BUFF_BASE_ADDR (0xa03c1c)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index c025188fa9bc..df018972a46b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -2032,8 +2032,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
- clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
-
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
@@ -2148,6 +2146,8 @@ out_iterate:
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
out:
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
/* no need to reset the device in unified images, if successful */
if (unified_image && !ret) {
/* nothing else to do if we already sent D0I3_END_CMD */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 573e46956c14..38d0bfb649cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -459,7 +459,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
const size_t bufsz = sizeof(buf);
int pos = 0;
+ mutex_lock(&mvm->mutex);
iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ mutex_unlock(&mvm->mutex);
+
do_div(curr_os, NSEC_PER_USEC);
diff = curr_os - curr_gp2;
pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 0637eb1cff4e..313e9f106f46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1090,20 +1090,22 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
{
+ u8 value;
+
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2);
+ DSM_FUNC_ENABLE_INDONESIA_5G2, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_INDONESIA_MAX)
+ else if (value >= DSM_VALUE_INDONESIA_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
- ret);
+ "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
+ else if (value == DSM_VALUE_INDONESIA_ENABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
return DSM_VALUE_INDONESIA_ENABLE;
@@ -1114,25 +1116,26 @@ static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
{
+ u8 value;
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_DISABLE_SRD);
+ DSM_FUNC_DISABLE_SRD, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
ret);
- else if (ret >= DSM_VALUE_SRD_MAX)
+ else if (value >= DSM_VALUE_SRD_MAX)
IWL_DEBUG_RADIO(mvm,
- "DSM function DISABLE_SRD return invalid value, ret=%d\n",
- ret);
+ "DSM function DISABLE_SRD return invalid value, value=%d\n",
+ value);
- else if (ret == DSM_VALUE_SRD_PASSIVE) {
+ else if (value == DSM_VALUE_SRD_PASSIVE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
return DSM_VALUE_SRD_PASSIVE;
- } else if (ret == DSM_VALUE_SRD_DISABLE) {
+ } else if (value == DSM_VALUE_SRD_DISABLE) {
IWL_DEBUG_RADIO(mvm,
"Evaluated DSM function DISABLE_SRD: disabling SRD\n");
return DSM_VALUE_SRD_DISABLE;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index da32937ba9a7..43ff0407916a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4194,6 +4194,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
iwl_mvm_binding_remove_vif(mvm, vif);
out:
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) &&
+ switching_chanctx)
+ return;
mvmvif->phy_ctxt = NULL;
iwl_mvm_power_update_mac(mvm);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 98f62d78cf9c..61618f607927 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -791,6 +791,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!mvm->scan_cmd)
goto out_free;
+ /* invalidate ids to prevent accidental removal of sta_id 0 */
+ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
+
/* Set EBS as successful as long as not stated otherwise by the FW. */
mvm->last_ebs_successful = true;
@@ -1205,6 +1209,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
if (device_reprobe(reprobe->dev))
dev_err(reprobe->dev, "reprobe failed!\n");
+ put_device(reprobe->dev);
kfree(reprobe);
module_put(THIS_MODULE);
}
@@ -1255,7 +1260,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
module_put(THIS_MODULE);
return;
}
- reprobe->dev = mvm->trans->dev;
+ reprobe->dev = get_device(mvm->trans->dev);
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index dc174410bf9c..578c353ae02c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2057,6 +2057,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
@@ -2071,6 +2074,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
+ return -EINVAL;
+
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index a983c215df31..3712adc3ccc2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -773,6 +773,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
+ skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL;
else if (next)
@@ -795,6 +796,8 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
if (tcp_payload_len > mss) {
skb_shinfo(tmp)->gso_size = mss;
+ skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
+ SKB_GSO_TCPV6;
} else {
if (qos) {
u8 *qc;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 36bf414a388a..5b5134dd49af 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -75,6 +75,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+ u32_encode_bits(250,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+ CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+ u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
struct iwl_context_info_gen3 *ctxt_info_gen3;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -189,8 +198,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
/* Allocate IML */
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr, GFP_KERNEL);
- if (!iml_img)
- return -ENOMEM;
+ if (!iml_img) {
+ ret = -ENOMEM;
+ goto err_free_ctxt_info;
+ }
memcpy(iml_img, trans->iml, trans->iml_len);
@@ -206,23 +217,19 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
- /*
- * The firmware initializes this again later (to a smaller
- * value), but for the boot process initialize the LTR to
- * ~250 usec.
- */
- u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
- u32_encode_bits(250,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
- CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
- u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
-
- iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->trans_cfg->integrated) {
+ iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+ } else if (trans->trans_cfg->integrated &&
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+ iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
@@ -232,6 +239,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
return 0;
+err_free_ctxt_info:
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_gen3 = NULL;
err_free_prph_info:
dma_free_coherent(trans->dev,
sizeof(*prph_info),
@@ -294,6 +306,9 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
return ret;
}
+ if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+ return -EBUSY;
+
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
cpu_to_le64(trans_pcie->pnvm_dram.physical);
prph_sc_ctrl->pnvm_cfg.pnvm_size =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 965982612e74..ed3f5b7aa71e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -910,6 +910,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -917,6 +922,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_c0_hr_b0, iwl_ax203_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 285e0d586021..ab93a848a466 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2107,7 +2107,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
while (offs < dwords) {
/* limit the time we spin here under lock to 1/2s */
- ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
+ unsigned long end = jiffies + HZ / 2;
+ bool resched = false;
if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR,
@@ -2118,14 +2119,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
HBUS_TARG_MEM_RDAT);
offs++;
- /* calling ktime_get is expensive so
- * do it once in 128 reads
- */
- if (offs % 128 == 0 && ktime_after(ktime_get(),
- timeout))
+ if (time_after(jiffies, end)) {
+ resched = true;
break;
+ }
}
iwl_trans_release_nic_access(trans, &flags);
+
+ if (resched)
+ cond_resched();
} else {
return -EBUSY;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 5dda0015522d..83f4964f3cb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -201,6 +201,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ if (!txq) {
+ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
+ return;
+ }
+
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 27eea909e32d..7ff1bb0ccc9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -142,26 +142,25 @@ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
* idx is bounded by n_window
*/
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
iwl_txq_get_tfd(trans, txq, idx));
- /* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
@@ -841,10 +840,8 @@ void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
- if (WARN_ON_ONCE(!skb))
- continue;
-
- iwl_txq_free_tso_page(trans, skb);
+ if (!WARN_ON_ONCE(!skb))
+ iwl_txq_free_tso_page(trans, skb);
}
iwl_txq_gen2_free_tfd(trans, txq);
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
@@ -1494,28 +1491,28 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
*/
int rd_ptr = txq->read_ptr;
int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
+ struct sk_buff *skb;
lockdep_assert_held(&txq->lock);
+ if (!txq->entries)
+ return;
+
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
+ skb = txq->entries[idx].skb;
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index a44b7766dec6..c13547841a4e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -231,7 +231,7 @@ mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *seq)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- enum mt76_txq_id qid;
+ enum mt76_mcuq_id qid;
mt7615_mcu_fill_msg(dev, skb, cmd, seq);
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
index 13d77f8fca86..9fb506f2ace6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
@@ -83,7 +83,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
{
struct mt76_queue *q = &dev->q_rx[qid];
struct mt76_sdio *sdio = &dev->sdio;
- int len = 0, err, i, order;
+ int len = 0, err, i;
struct page *page;
u8 *buf;
@@ -96,8 +96,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
- order = get_order(len);
- page = __dev_alloc_pages(GFP_KERNEL, order);
+ page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
if (!page)
return -ENOMEM;
@@ -106,7 +105,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
- __free_pages(page, order);
+ put_page(page);
return err;
}
@@ -123,7 +122,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
if (q->queued + i + 1 == q->ndesc)
break;
}
- __free_pages(page, order);
+ put_page(page);
spin_lock_bh(&q->lock);
q->head = (q->head + i) % q->ndesc;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index ed4635bd151a..102a8f14c22d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -40,9 +40,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
.types = BIT(NL80211_IFTYPE_ADHOC)
}, {
.max = 16,
- .types = BIT(NL80211_IFTYPE_AP) |
+ .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT)
+ | BIT(NL80211_IFTYPE_MESH_POINT)
#endif
}, {
.max = MT7915_MAX_INTERFACES,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 5fdd1a6d32ee..e211a2bd4d3c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -256,7 +256,7 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_mcu_txd *mcu_txd;
u8 seq, pkt_fmt, qidx;
- enum mt76_txq_id txq;
+ enum mt76_mcuq_id qid;
__le32 *txd;
u32 val;
@@ -268,18 +268,18 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (cmd == -MCU_CMD_FW_SCATTER) {
- txq = MT_MCUQ_FWDL;
+ qid = MT_MCUQ_FWDL;
goto exit;
}
mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
- txq = MT_MCUQ_WA;
+ qid = MT_MCUQ_WA;
qidx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
} else {
- txq = MT_MCUQ_WM;
+ qid = MT_MCUQ_WM;
qidx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
}
@@ -326,7 +326,7 @@ exit:
if (wait_seq)
*wait_seq = seq;
- return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+ return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 62b5b912818f..0b6facb17ff7 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -157,10 +157,14 @@ static void mt76s_net_worker(struct mt76_worker *w)
static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_queue_entry entry;
int nframes = 0;
+ bool mcu;
+ if (!q)
+ return 0;
+
+ mcu = q == dev->q_mcu[MT_MCUQ_WM];
while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
@@ -177,21 +181,12 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
nframes++;
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
- if (mcu)
- goto out;
-
- mt76_txq_schedule(&dev->phy, q->qid);
+ if (!mcu)
+ mt76_txq_schedule(&dev->phy, q->qid);
- if (wake)
- ieee80211_wake_queue(dev->hw, q->qid);
-out:
return nframes;
}
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index dc850109de22..b95d093728b9 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -811,11 +811,12 @@ static void mt76u_status_worker(struct mt76_worker *w)
struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
struct mt76_queue_entry entry;
struct mt76_queue *q;
- bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->phy.q_tx[i];
+ if (!q)
+ continue;
while (q->queued > 0) {
if (!q->entry[q->tail].done)
@@ -827,10 +828,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
mt76_queue_tx_complete(dev, q, &entry);
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
@@ -839,8 +836,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->wq, &dev->usb.stat_work);
- if (wake)
- ieee80211_wake_queue(dev->hw, i);
}
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 5f99054f535b..af7d1ecb777c 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -152,8 +152,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
if (new_p) {
/* we have one extra ref from the allocator */
- __free_pages(e->p, MT_RX_ORDER);
-
+ put_page(e->p);
e->p = new_p;
}
}
@@ -310,7 +309,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
}
e = &q->e[q->end];
- e->skb = skb;
usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
mt7601u_complete_tx, q);
ret = usb_submit_urb(e->urb, GFP_ATOMIC);
@@ -328,6 +326,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
q->end = (q->end + 1) % q->entries;
q->used++;
+ e->skb = skb;
if (q->used >= q->entries)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index a7259dbc953d..965bd9589045 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
err = request_firmware(&firmware,
@@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
}
pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
- return;
+ goto exit;
}
found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
release_firmware(firmware);
- return;
+ goto exit;
}
if (!is_wow) {
memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@@ -109,6 +108,9 @@ found_alt:
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
release_firmware(firmware);
+
+exit:
+ complete(&rtlpriv->firmware_loading_complete);
}
void rtl_fw_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index f1c1624cec8f..6f10e0998f1c 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -557,12 +557,14 @@ static int xen_register_credit_watch(struct xenbus_device *dev,
return -ENOMEM;
snprintf(node, maxlen, "%s/rate", dev->nodename);
vif->credit_watch.node = node;
+ vif->credit_watch.will_handle = NULL;
vif->credit_watch.callback = xen_net_rate_changed;
err = register_xenbus_watch(&vif->credit_watch);
if (err) {
pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
kfree(node);
vif->credit_watch.node = NULL;
+ vif->credit_watch.will_handle = NULL;
vif->credit_watch.callback = NULL;
}
return err;
@@ -609,6 +611,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
snprintf(node, maxlen, "%s/request-multicast-control",
dev->otherend);
vif->mcast_ctrl_watch.node = node;
+ vif->mcast_ctrl_watch.will_handle = NULL;
vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
err = register_xenbus_watch(&vif->mcast_ctrl_watch);
if (err) {
@@ -616,6 +619,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
vif->mcast_ctrl_watch.node);
kfree(node);
vif->mcast_ctrl_watch.node = NULL;
+ vif->mcast_ctrl_watch.will_handle = NULL;
vif->mcast_ctrl_watch.callback = NULL;
}
return err;
@@ -820,7 +824,7 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
hotplug_status_changed,
"%s/%s", dev->nodename, "hotplug-status");
if (!err)
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
index 103bf5c92bdc..f042d3eaf8f6 100644
--- a/drivers/nfc/s3fwrn5/nci.c
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -23,31 +23,11 @@ static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb)
static struct nci_driver_ops s3fwrn5_nci_prop_ops[] = {
{
.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_AGAIN),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_GET_RFREG),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
NCI_PROP_SET_RFREG),
.rsp = s3fwrn5_nci_prop_rsp,
},
{
.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_GET_RFREG_VER),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_SET_RFREG_VER),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
NCI_PROP_START_RFREG),
.rsp = s3fwrn5_nci_prop_rsp,
},
@@ -61,11 +41,6 @@ static struct nci_driver_ops s3fwrn5_nci_prop_ops[] = {
NCI_PROP_FW_CFG),
.rsp = s3fwrn5_nci_prop_rsp,
},
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_WR_RESET),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
};
void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n)
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
index 23c0b28f247a..a80f0fb082a8 100644
--- a/drivers/nfc/s3fwrn5/nci.h
+++ b/drivers/nfc/s3fwrn5/nci.h
@@ -11,9 +11,6 @@
#include "s3fwrn5.h"
-#define NCI_PROP_AGAIN 0x01
-
-#define NCI_PROP_GET_RFREG 0x21
#define NCI_PROP_SET_RFREG 0x22
struct nci_prop_set_rfreg_cmd {
@@ -25,23 +22,6 @@ struct nci_prop_set_rfreg_rsp {
__u8 status;
};
-#define NCI_PROP_GET_RFREG_VER 0x24
-
-struct nci_prop_get_rfreg_ver_rsp {
- __u8 status;
- __u8 data[8];
-};
-
-#define NCI_PROP_SET_RFREG_VER 0x25
-
-struct nci_prop_set_rfreg_ver_cmd {
- __u8 data[8];
-};
-
-struct nci_prop_set_rfreg_ver_rsp {
- __u8 status;
-};
-
#define NCI_PROP_START_RFREG 0x26
struct nci_prop_start_rfreg_rsp {
@@ -70,8 +50,6 @@ struct nci_prop_fw_cfg_rsp {
__u8 status;
};
-#define NCI_PROP_WR_RESET 0x2f
-
void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n);
int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name);
diff --git a/drivers/nfc/s3fwrn5/phy_common.c b/drivers/nfc/s3fwrn5/phy_common.c
index 497b02b30ae7..81318478d5fd 100644
--- a/drivers/nfc/s3fwrn5/phy_common.c
+++ b/drivers/nfc/s3fwrn5/phy_common.c
@@ -20,7 +20,8 @@ void s3fwrn5_phy_set_wake(void *phy_id, bool wake)
mutex_lock(&phy->mutex);
gpio_set_value(phy->gpio_fw_wake, wake);
- msleep(S3FWRN5_EN_WAIT_TIME);
+ if (wake)
+ msleep(S3FWRN5_EN_WAIT_TIME);
mutex_unlock(&phy->mutex);
}
EXPORT_SYMBOL(s3fwrn5_phy_set_wake);
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index d54261f50851..e7a4c2aa8baa 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2511,7 +2511,7 @@ static int idt_init_dbgfs(struct idt_ntb_dev *ndev)
/* If the top directory is not created then do nothing */
if (IS_ERR_OR_NULL(dbgfs_topdir)) {
dev_info(&ndev->ntb.pdev->dev, "Top DebugFS directory absent");
- return PTR_ERR(dbgfs_topdir);
+ return PTR_ERR_OR_ZERO(dbgfs_topdir);
}
/* Create the info file node */
@@ -2756,7 +2756,7 @@ static int idt_pci_probe(struct pci_dev *pdev,
/* Allocate the memory for IDT NTB device data */
ndev = idt_create_dev(pdev, id);
- if (IS_ERR_OR_NULL(ndev))
+ if (IS_ERR(ndev))
return PTR_ERR(ndev);
/* Initialize the basic PCI subsystem of the device */
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h
index 1b759942d8af..344249fc18d1 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h
@@ -141,6 +141,7 @@
#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3)
#define NTB_HWERR_BAR_ALIGN BIT_ULL(4)
+#define NTB_HWERR_LTR_BAD BIT_ULL(5)
extern struct intel_b2b_addr xeon_b2b_usd_addr;
extern struct intel_b2b_addr xeon_b2b_dsd_addr;
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
index bc4541cbf8c6..fede05151f69 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -177,8 +177,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
ndev->reg = &gen4_reg;
- if (pdev_is_ICX(pdev))
+ if (pdev_is_ICX(pdev)) {
ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN;
+ ndev->hwerr_flags |= NTB_HWERR_LTR_BAD;
+ }
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
@@ -431,6 +433,25 @@ static int intel_ntb4_link_enable(struct ntb_dev *ntb,
dev_dbg(&ntb->pdev->dev,
"ignoring max_width %d\n", max_width);
+ if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD)) {
+ u32 ltr;
+
+ /* Setup active snoop LTR values */
+ ltr = NTB_LTR_ACTIVE_REQMNT | NTB_LTR_ACTIVE_VAL | NTB_LTR_ACTIVE_LATSCALE;
+ /* Setup active non-snoop values */
+ ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr;
+ iowrite32(ltr, ndev->self_mmio + GEN4_LTR_ACTIVE_OFFSET);
+
+ /* Setup idle snoop LTR values */
+ ltr = NTB_LTR_IDLE_VAL | NTB_LTR_IDLE_LATSCALE | NTB_LTR_IDLE_REQMNT;
+ /* Setup idle non-snoop values */
+ ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr;
+ iowrite32(ltr, ndev->self_mmio + GEN4_LTR_IDLE_OFFSET);
+
+ /* setup PCIe LTR to active */
+ iowrite8(NTB_LTR_SWSEL_ACTIVE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET);
+ }
+
ntb_ctl = NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP;
ntb_ctl |= NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP;
iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
@@ -476,6 +497,10 @@ static int intel_ntb4_link_disable(struct ntb_dev *ntb)
lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+ /* set LTR to idle */
+ if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD))
+ iowrite8(NTB_LTR_SWSEL_IDLE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET);
+
ndev->dev_up = 0;
return 0;
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h
index a868c788de02..3fcd3fdce9ed 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h
@@ -35,6 +35,9 @@
#define GEN4_IM_SPAD_SEM_OFFSET 0x00c0 /* SPAD hw semaphore */
#define GEN4_IM_SPAD_STICKY_OFFSET 0x00c4 /* sticky SPAD */
#define GEN4_IM_DOORBELL_OFFSET 0x0100 /* 0-31 doorbells */
+#define GEN4_LTR_SWSEL_OFFSET 0x30ec
+#define GEN4_LTR_ACTIVE_OFFSET 0x30f0
+#define GEN4_LTR_IDLE_OFFSET 0x30f4
#define GEN4_EM_SPAD_OFFSET 0x8080
/* note, link status is now in MMIO and not config space for NTB */
#define GEN4_LINK_CTRL_OFFSET 0xb050
@@ -80,6 +83,18 @@
#define NTB_SJC_FORCEDETECT 0x000004
+#define NTB_LTR_SWSEL_ACTIVE 0x0
+#define NTB_LTR_SWSEL_IDLE 0x1
+
+#define NTB_LTR_NS_SHIFT 16
+#define NTB_LTR_ACTIVE_VAL 0x0000 /* 0 us */
+#define NTB_LTR_ACTIVE_LATSCALE 0x0800 /* 1us scale */
+#define NTB_LTR_ACTIVE_REQMNT 0x8000 /* snoop req enable */
+
+#define NTB_LTR_IDLE_VAL 0x0258 /* 600 us */
+#define NTB_LTR_IDLE_LATSCALE 0x0800 /* 1us scale */
+#define NTB_LTR_IDLE_REQMNT 0x8000 /* snoop req enable */
+
ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp);
int gen4_init_dev(struct intel_ntb_dev *ndev);
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 0a5e884a920c..3f05cfbc73af 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -282,15 +282,13 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
struct ntb_msi_desc *msi_desc)
{
struct msi_desc *entry;
- struct irq_desc *desc;
int ret;
if (!ntb->msi)
return -EINVAL;
for_each_pci_msi_entry(entry, ntb->pdev) {
- desc = irq_to_desc(entry->irq);
- if (desc->action)
+ if (irq_has_action(entry->irq))
continue;
ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler,
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 2e258bee7db2..aa53e0b769bd 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -7,7 +7,6 @@
#ifndef _LINUX_BTT_H
#define _LINUX_BTT_H
-#include <linux/badblocks.h>
#include <linux/types.h>
#define BTT_SIG_LEN 16
@@ -197,6 +196,8 @@ struct arena_info {
int log_index[2];
};
+struct badblocks;
+
/**
* struct btt - handle for a BTT instance
* @btt_disk: Pointer to the gendisk for BTT device
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 5a7c80053c62..030dbde6b088 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -4,6 +4,7 @@
*/
#include <linux/device.h>
#include <linux/sizes.h>
+#include <linux/badblocks.h>
#include "nd-core.h"
#include "pmem.h"
#include "pfn.h"
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index c21ba0602029..7de592d7eff4 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -3,7 +3,6 @@
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/libnvdimm.h>
-#include <linux/badblocks.h>
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/module.h>
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index b59032e0859b..9d208570d059 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -335,16 +335,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(state);
-static ssize_t available_slots_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
{
- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
+ struct device *dev;
ssize_t rc;
u32 nfree;
if (!ndd)
return -ENXIO;
+ dev = ndd->dev;
nvdimm_bus_lock(dev);
nfree = nd_label_nfree(ndd);
if (nfree - 1 > nfree) {
@@ -356,6 +356,18 @@ static ssize_t available_slots_show(struct device *dev,
nvdimm_bus_unlock(dev);
return rc;
}
+
+static ssize_t available_slots_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rc;
+
+ nd_device_lock(dev);
+ rc = __available_slots_show(dev_get_drvdata(dev), buf);
+ nd_device_unlock(dev);
+
+ return rc;
+}
static DEVICE_ATTR_RO(available_slots);
__weak ssize_t security_show(struct device *dev,
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 47a4828b8b31..9251441fd8a3 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -980,6 +980,15 @@ static int __blk_label_update(struct nd_region *nd_region,
}
}
+ /* release slots associated with any invalidated UUIDs */
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
+ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
+ reap_victim(nd_mapping, label_ent);
+ list_move(&label_ent->list, &list);
+ }
+ mutex_unlock(&nd_mapping->lock);
+
/*
* Find the resource associated with the first label in the set
* per the v1.2 namespace specification.
@@ -999,8 +1008,10 @@ static int __blk_label_update(struct nd_region *nd_region,
if (is_old_resource(res, old_res_list, old_num_resources))
continue; /* carry-over */
slot = nd_label_alloc_slot(ndd);
- if (slot == UINT_MAX)
+ if (slot == UINT_MAX) {
+ rc = -ENXIO;
goto abort;
+ }
dev_dbg(ndd->dev, "allocated: %d\n", slot);
nd_label = to_label(ndd, slot);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 6da67f4d641a..2403b71b601e 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1635,11 +1635,11 @@ static umode_t namespace_visible(struct kobject *kobj,
return a->mode;
}
- if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
- || a == &dev_attr_holder.attr
- || a == &dev_attr_holder_class.attr
- || a == &dev_attr_force_raw.attr
- || a == &dev_attr_mode.attr)
+ /* base is_namespace_io() attributes */
+ if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
+ a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
+ a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
+ a == &dev_attr_resource.attr)
return a->mode;
return 0;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 875076b0ea6c..f33bdae626ba 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -23,7 +23,6 @@
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/nd.h>
-#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "pmem.h"
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9a270e49df17..f13eb4ded95f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -85,7 +85,7 @@ static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
static DEFINE_IDA(nvme_instance_ida);
-static dev_t nvme_chr_devt;
+static dev_t nvme_ctrl_base_chr_devt;
static struct class *nvme_class;
static struct class *nvme_subsys_class;
@@ -93,16 +93,6 @@ static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
-static void nvme_update_bdev_size(struct gendisk *disk)
-{
- struct block_device *bdev = bdget_disk(disk, 0);
-
- if (bdev) {
- bd_set_nr_sectors(bdev, get_capacity(disk));
- bdput(bdev);
- }
-}
-
/*
* Prepare a queue for teardown.
*
@@ -119,8 +109,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
blk_set_queue_dying(ns->queue);
blk_mq_unquiesce_queue(ns->queue);
- set_capacity(ns->disk, 0);
- nvme_update_bdev_size(ns->disk);
+ set_capacity_and_notify(ns->disk, 0);
}
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -148,6 +137,38 @@ int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
+static void nvme_failfast_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, failfast_work);
+
+ if (ctrl->state != NVME_CTRL_CONNECTING)
+ return;
+
+ set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+ dev_info(ctrl->device, "failfast expired\n");
+ nvme_kick_requeue_lists(ctrl);
+}
+
+static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
+ return;
+
+ schedule_delayed_work(&ctrl->failfast_work,
+ ctrl->opts->fast_io_fail_tmo * HZ);
+}
+
+static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->opts)
+ return;
+
+ cancel_delayed_work_sync(&ctrl->failfast_work);
+ clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+}
+
+
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
@@ -158,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -171,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
@@ -310,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
- nvme_trace_bio_complete(req, status);
+ nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@@ -433,8 +453,17 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
}
spin_unlock_irqrestore(&ctrl->lock, flags);
- if (changed && ctrl->state == NVME_CTRL_LIVE)
+ if (!changed)
+ return false;
+
+ if (ctrl->state == NVME_CTRL_LIVE) {
+ if (old_state == NVME_CTRL_CONNECTING)
+ nvme_stop_failfast_work(ctrl);
nvme_kick_requeue_lists(ctrl);
+ } else if (ctrl->state == NVME_CTRL_CONNECTING &&
+ old_state == NVME_CTRL_RESETTING) {
+ nvme_start_failfast_work(ctrl);
+ }
return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
@@ -518,29 +547,48 @@ static inline void nvme_clear_nvme_request(struct request *req)
}
}
-struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
+static inline unsigned int nvme_req_op(struct nvme_command *cmd)
{
- unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
- struct request *req;
+ return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
+}
- if (qid == NVME_QID_ANY) {
- req = blk_mq_alloc_request(q, op, flags);
- } else {
- req = blk_mq_alloc_request_hctx(q, op, flags,
- qid ? qid - 1 : 0);
- }
- if (IS_ERR(req))
- return req;
+static inline void nvme_init_request(struct request *req,
+ struct nvme_command *cmd)
+{
+ if (req->q->queuedata)
+ req->timeout = NVME_IO_TIMEOUT;
+ else /* no queuedata implies admin queue */
+ req->timeout = NVME_ADMIN_TIMEOUT;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
nvme_clear_nvme_request(req);
nvme_req(req)->cmd = cmd;
+}
+struct request *nvme_alloc_request(struct request_queue *q,
+ struct nvme_command *cmd, blk_mq_req_flags_t flags)
+{
+ struct request *req;
+
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+ if (!IS_ERR(req))
+ nvme_init_request(req, cmd);
return req;
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
+ struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
+{
+ struct request *req;
+
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ qid ? qid - 1 : 0);
+ if (!IS_ERR(req))
+ nvme_init_request(req, cmd);
+ return req;
+}
+
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
struct nvme_command c;
@@ -897,11 +945,15 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct request *req;
int ret;
- req = nvme_alloc_request(q, cmd, flags, qid);
+ if (qid == NVME_QID_ANY)
+ req = nvme_alloc_request(q, cmd, flags);
+ else
+ req = nvme_alloc_request_qid(q, cmd, flags, qid);
if (IS_ERR(req))
return PTR_ERR(req);
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ if (timeout)
+ req->timeout = timeout;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -1067,11 +1119,12 @@ static int nvme_submit_user_cmd(struct request_queue *q,
void *meta = NULL;
int ret;
- req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
+ req = nvme_alloc_request(q, cmd, 0);
if (IS_ERR(req))
return PTR_ERR(req);
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ if (timeout)
+ req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
@@ -1141,8 +1194,8 @@ static int nvme_keep_alive(struct nvme_ctrl *ctrl)
{
struct request *rq;
- rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
- NVME_QID_ANY);
+ rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
+ BLK_MQ_REQ_RESERVED);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -1302,7 +1355,8 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
NVME_IDENTIFY_DATA_SIZE);
if (status) {
dev_warn(ctrl->device,
- "Identify Descriptors failed (%d)\n", status);
+ "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
+ nsid, status);
goto free_data;
}
@@ -1489,8 +1543,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
}
length = (io.nblocks + 1) << ns->lba_shift;
- meta_len = (io.nblocks + 1) * ns->ms;
- metadata = nvme_to_user_ptr(io.metadata);
+
+ if ((io.control & NVME_RW_PRINFO_PRACT) &&
+ ns->ms == sizeof(struct t10_pi_tuple)) {
+ /*
+ * Protection information is stripped/inserted by the
+ * controller.
+ */
+ if (nvme_to_user_ptr(io.metadata))
+ return -EINVAL;
+ meta_len = 0;
+ metadata = NULL;
+ } else {
+ meta_len = (io.nblocks + 1) * ns->ms;
+ metadata = nvme_to_user_ptr(io.metadata);
+ }
if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
@@ -2053,12 +2120,13 @@ static void nvme_update_disk_info(struct gendisk *disk,
capacity = 0;
}
- set_capacity_revalidate_and_notify(disk, capacity, false);
+ set_capacity_and_notify(disk, capacity);
nvme_config_discard(disk, ns);
nvme_config_write_zeroes(disk, ns);
- if (id->nsattr & NVME_NS_ATTR_RO)
+ if ((id->nsattr & NVME_NS_ATTR_RO) ||
+ test_bit(NVME_NS_FORCE_RO, &ns->flags))
set_disk_ro(disk, true);
}
@@ -2134,7 +2202,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
blk_queue_update_readahead(ns->head->disk->queue);
- nvme_update_bdev_size(ns->head->disk);
blk_mq_unfreeze_queue(ns->head->disk->queue);
}
#endif
@@ -2261,13 +2328,13 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
cmd.common.cdw11 = cpu_to_le32(len);
- return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
+ NVME_QID_ANY, 1, 0, false);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
-static const struct block_device_operations nvme_fops = {
+static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
.compat_ioctl = nvme_compat_ioctl,
@@ -2802,6 +2869,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -2821,7 +2893,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
}
if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
- (ctrl->opts && ctrl->opts->discovery_nqn))
+ nvme_discovery_ctrl(ctrl))
continue;
dev_err(ctrl->device,
@@ -3090,7 +3162,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
- if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
@@ -3130,7 +3202,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
- if (!ctrl->identified) {
+ if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
ret = nvme_hwmon_init(ctrl);
if (ret < 0)
return ret;
@@ -3274,7 +3346,7 @@ static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
- if (disk->fops == &nvme_fops)
+ if (disk->fops == &nvme_bdev_ops)
return nvme_get_ns_from_dev(dev)->head;
else
return disk->private_data;
@@ -3383,7 +3455,7 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
}
#ifdef CONFIG_NVME_MULTIPATH
if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
- if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
+ if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
return 0;
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
return 0;
@@ -3757,7 +3829,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
}
}
- list_add_tail(&ns->siblings, &head->list);
+ list_add_tail_rcu(&ns->siblings, &head->list);
ns->head = head;
mutex_unlock(&ctrl->subsys->lock);
return 0;
@@ -3804,7 +3876,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct gendisk *disk;
struct nvme_id_ns *id;
char disk_name[DISK_NAME_LEN];
- int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
+ int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT;
if (nvme_identify_ns(ctrl, nsid, ids, &id))
return;
@@ -3828,8 +3900,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
ns->ctrl = ctrl;
kref_init(&ns->kref);
- ret = nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED);
- if (ret)
+ if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
goto out_free_queue;
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
@@ -3837,7 +3908,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!disk)
goto out_unlink_ns;
- disk->fops = &nvme_fops;
+ disk->fops = &nvme_bdev_ops;
disk->private_data = ns;
disk->queue = ns->queue;
disk->flags = flags;
@@ -3848,8 +3919,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
goto out_put_disk;
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
- ret = nvme_nvm_register(ns, disk_name, node);
- if (ret) {
+ if (nvme_nvm_register(ns, disk_name, node)) {
dev_warn(ctrl->device, "LightNVM init failure\n");
goto out_put_disk;
}
@@ -3962,8 +4032,6 @@ out:
*/
if (ret && ret != -ENOMEM && !(ret > 0 && !(ret & NVME_SC_DNR)))
nvme_ns_remove(ns);
- else
- revalidate_disk_size(ns->disk, true);
}
static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
@@ -4042,8 +4110,11 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
NVME_IDENTIFY_DATA_SIZE);
- if (ret)
+ if (ret) {
+ dev_warn(ctrl->device,
+ "Identify NS List failed (status=0x%x)\n", ret);
goto free;
+ }
for (i = 0; i < nr_entries; i++) {
u32 nsid = le32_to_cpu(ns_list[i]);
@@ -4346,6 +4417,7 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
nvme_stop_keep_alive(ctrl);
+ nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work);
}
@@ -4423,6 +4495,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
int ret;
ctrl->state = NVME_CTRL_NEW;
+ clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
@@ -4439,6 +4512,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
init_waitqueue_head(&ctrl->state_wq);
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
@@ -4457,7 +4531,8 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
device_initialize(&ctrl->ctrl_device);
ctrl->device = &ctrl->ctrl_device;
- ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
+ ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
+ ctrl->instance);
ctrl->device->class = nvme_class;
ctrl->device->parent = ctrl->dev;
ctrl->device->groups = nvme_dev_attr_groups;
@@ -4666,7 +4741,8 @@ static int __init nvme_core_init(void)
if (!nvme_delete_wq)
goto destroy_reset_wq;
- result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
+ result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
+ NVME_MINORS, "nvme");
if (result < 0)
goto destroy_delete_wq;
@@ -4687,7 +4763,7 @@ static int __init nvme_core_init(void)
destroy_class:
class_destroy(nvme_class);
unregister_chrdev:
- unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+ unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_delete_wq:
destroy_workqueue(nvme_delete_wq);
destroy_reset_wq:
@@ -4702,7 +4778,7 @@ static void __exit nvme_core_exit(void)
{
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
- unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+ unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_workqueue(nvme_delete_wq);
destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 8575724734e0..72ac00173500 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -549,6 +549,7 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
{
if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
ctrl->state != NVME_CTRL_DEAD &&
+ !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
@@ -615,6 +616,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" },
+ { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_ERR, NULL }
};
@@ -634,6 +636,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
opts->kato = NVME_DEFAULT_KATO;
opts->duplicate_connect = false;
+ opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
opts->hdr_digest = false;
opts->data_digest = false;
opts->tos = -1; /* < 0 == use transport default */
@@ -754,6 +757,17 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
ctrl_loss_tmo = token;
break;
+ case NVMF_OPT_FAIL_FAST_TMO:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (token >= 0)
+ pr_warn("I/O fail on reconnect controller after %d sec\n",
+ token);
+ opts->fast_io_fail_tmo = token;
+ break;
case NVMF_OPT_HOSTNQN:
if (opts->host) {
pr_err("hostnqn already user-assigned: %s\n",
@@ -884,11 +898,15 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->nr_poll_queues = 0;
opts->duplicate_connect = true;
}
- if (ctrl_loss_tmo < 0)
+ if (ctrl_loss_tmo < 0) {
opts->max_reconnects = -1;
- else
+ } else {
opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
opts->reconnect_delay);
+ if (ctrl_loss_tmo < opts->fast_io_fail_tmo)
+ pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
+ opts->fast_io_fail_tmo, ctrl_loss_tmo);
+ }
if (!opts->host) {
kref_get(&nvmf_default_host->ref);
@@ -988,7 +1006,8 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
- NVMF_OPT_DISABLE_SQFLOW)
+ NVMF_OPT_DISABLE_SQFLOW |\
+ NVMF_OPT_FAIL_FAST_TMO)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf)
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a9c1e3b4585e..733010d2eafd 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -15,6 +15,8 @@
#define NVMF_DEF_RECONNECT_DELAY 10
/* default to 600 seconds of reconnect attempts before giving up */
#define NVMF_DEF_CTRL_LOSS_TMO 600
+/* default is -1: the fail fast mechanism is disabled */
+#define NVMF_DEF_FAIL_FAST_TMO -1
/*
* Define a host as seen by the target. We allocate one at boot, but also
@@ -56,6 +58,7 @@ enum {
NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
NVMF_OPT_TOS = 1 << 19,
+ NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
};
/**
@@ -89,6 +92,7 @@ enum {
* @nr_write_queues: number of queues for write I/O
* @nr_poll_queues: number of queues for polling I/O
* @tos: type of service
+ * @fast_io_fail_tmo: Fast I/O fail timeout in seconds
*/
struct nvmf_ctrl_options {
unsigned mask;
@@ -111,6 +115,7 @@ struct nvmf_ctrl_options {
unsigned int nr_write_queues;
unsigned int nr_poll_queues;
int tos;
+ int fast_io_fail_tmo;
};
/*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index f4c246462658..5f36cfa8136c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
+ struct work_struct ioerr_work;
struct delayed_work connect_work;
struct kref ref;
@@ -1889,6 +1890,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
+static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
@@ -2046,7 +2056,7 @@ done:
check_error:
if (terminate_assoc)
- nvme_fc_error_recovery(ctrl, "transport detected io error");
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3479,7 +3491,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->lport->ops->fcprqst_priv_sz);
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+ ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 8e562d0f2c30..470cef3abec3 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -653,7 +653,7 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
nvme_nvm_rqtocmd(rqd, ns, cmd);
- rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
+ rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0);
if (IS_ERR(rq))
return rq;
@@ -767,14 +767,14 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
DECLARE_COMPLETION_ONSTACK(wait);
int ret = 0;
- rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
- NVME_QID_ANY);
+ rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0);
if (IS_ERR(rq)) {
ret = -ENOMEM;
goto err_cmd;
}
- rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ if (timeout)
+ rq->timeout = timeout;
if (ppa_buf && ppa_len) {
ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 74896be40c17..282b7a4ea9a9 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -221,7 +221,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
}
for (ns = nvme_next_ns(head, old);
- ns != old;
+ ns && ns != old;
ns = nvme_next_ns(head, ns)) {
if (nvme_path_is_disabled(ns))
continue;
@@ -279,6 +279,8 @@ static bool nvme_available_path(struct nvme_ns_head *head)
struct nvme_ns *ns;
list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
+ continue;
switch (ns->ctrl->state) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
@@ -312,8 +314,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
if (likely(ns)) {
bio->bi_disk = ns->disk;
bio->bi_opf |= REQ_NVME_MPATH;
- trace_block_bio_remap(bio->bi_disk->queue, bio,
- disk_devt(ns->head->disk),
+ trace_block_bio_remap(bio, disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
ret = submit_bio_noacct(bio);
} else if (nvme_available_path(head)) {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 567f7ad18a91..88a6b97247f5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -24,7 +24,7 @@ extern unsigned int nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
extern unsigned int admin_timeout;
-#define ADMIN_TIMEOUT (admin_timeout * HZ)
+#define NVME_ADMIN_TIMEOUT (admin_timeout * HZ)
#define NVME_DEFAULT_KATO 5
#define NVME_KATO_GRACE 10
@@ -178,7 +178,8 @@ static inline u16 nvme_req_qid(struct request *req)
{
if (!req->q->queuedata)
return 0;
- return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
+
+ return req->mq_hctx->queue_num + 1;
}
/* The below value is the specific amount of delay needed before checking
@@ -298,6 +299,7 @@ struct nvme_ctrl {
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
+ struct delayed_work failfast_work;
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
unsigned long events;
@@ -331,6 +333,8 @@ struct nvme_ctrl {
u16 icdoff;
u16 maxcmd;
int nr_reconnects;
+ unsigned long flags;
+#define NVME_CTRL_FAILFAST_EXPIRED 0
struct nvmf_ctrl_options *opts;
struct page *discard_page;
@@ -442,6 +446,7 @@ struct nvme_ns {
#define NVME_NS_REMOVING 0
#define NVME_NS_DEAD 1
#define NVME_NS_ANA_PENDING 2
+#define NVME_NS_FORCE_RO 3
struct nvme_fault_inject fault_inject;
@@ -604,7 +609,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
+ struct nvme_command *cmd, blk_mq_req_flags_t flags);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@@ -623,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
@@ -668,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -724,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
}
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3be352403839..6bad4d4dcdf0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -23,6 +23,7 @@
#include <linux/t10-pi.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h>
@@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
return true;
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
int i;
- if (iod->dma_len) {
- dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
- rq_dma_dir(req));
- return;
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+ dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
}
- WARN_ON_ONCE(!iod->nents);
+}
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+ const int last_sg = SGES_PER_PAGE - 1;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+ for (i = 0; i < iod->npages; i++) {
+ struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
- if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
- dma_addr);
+ dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
- for (i = 0; i < iod->npages; i++) {
- void *addr = nvme_pci_iod_list(req)[i];
+}
- if (iod->use_sgl) {
- struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- next_dma_addr =
- le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
- } else {
- __le64 *prp_list = addr;
+ if (is_pci_p2pdma_page(sg_page(iod->sg)))
+ pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+ rq_dma_dir(req));
+ else
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+}
- next_dma_addr = le64_to_cpu(prp_list[last_prp]);
- }
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- dma_pool_free(dev->prp_page_pool, addr, dma_addr);
- dma_addr = next_dma_addr;
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
}
+ WARN_ON_ONCE(!iod->nents);
+
+ nvme_unmap_sg(dev, req);
+ if (iod->npages == 0)
+ dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ iod->first_dma);
+ else if (iod->use_sgl)
+ nvme_free_sgls(dev, req);
+ else
+ nvme_free_prps(dev, req);
mempool_free(iod->sg, dev->iod_mempool);
}
@@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
- return BLK_STS_RESOURCE;
+ goto free_prps;
list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
-
done:
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
-
return BLK_STS_OK;
-
- bad_sgl:
+free_prps:
+ nvme_free_prps(dev, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
"Invalid SGL for payload:%d nents:%d\n",
blk_rq_payload_bytes(req), iod->nents);
@@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list)
- return BLK_STS_RESOURCE;
+ goto free_sgls;
i = 0;
nvme_pci_iod_list(req)[iod->npages++] = sg_list;
@@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
} while (--entries > 0);
return BLK_STS_OK;
+free_sgls:
+ nvme_free_sgls(dev, req);
+ return BLK_STS_RESOURCE;
}
static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
- goto out;
+ goto out_free_sg;
if (is_pci_p2pdma_page(sg_page(iod->sg)))
nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
@@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
rq_dma_dir(req), DMA_ATTR_NO_WARN);
if (!nr_mapped)
- goto out;
+ goto out_free_sg;
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-out:
if (ret != BLK_STS_OK)
- nvme_unmap_data(dev, req);
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ nvme_unmap_sg(dev, req);
+out_free_sg:
+ mempool_free(iod->sg, dev->iod_mempool);
return ret;
}
@@ -967,6 +997,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
/*
@@ -975,17 +1006,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+ req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
- cqe->command_id, le16_to_cpu(cqe->sq_id));
+ command_id, le16_to_cpu(cqe->sq_id));
return;
}
@@ -1319,13 +1350,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
- BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+ BLK_MQ_REQ_NOWAIT);
if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
- abort_req->timeout = ADMIN_TIMEOUT;
abort_req->end_io_data = NULL;
blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
@@ -1622,7 +1652,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.nr_hw_queues = 1;
dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- dev->admin_tagset.timeout = ADMIN_TIMEOUT;
+ dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev->ctrl.numa_node;
dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
@@ -1795,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if (dev->cmb_size)
return;
+ if (NVME_CAP_CMBS(dev->ctrl.cap))
+ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!dev->cmbsz)
return;
@@ -1809,6 +1842,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
/*
+ * Tell the controller about the host side address mapping the CMB,
+ * and enable CMB decoding for the NVMe 1.4+ scheme:
+ */
+ if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+ (pci_bus_address(pdev, bar) + offset),
+ dev->bar + NVME_REG_CMBMSC);
+ }
+
+ /*
* Controllers may support a CMB size larger than their BAR,
* for example, due to being behind a bridge. Reduce the CMB to
* the reported size of the BAR
@@ -2104,6 +2147,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
{
+ /*
+ * If tags are shared with admin queue (Apple bug), then
+ * make sure we only use one IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ return 1;
return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
}
@@ -2122,16 +2171,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->nr_write_queues = write_queues;
dev->nr_poll_queues = poll_queues;
- /*
- * If tags are shared with admin queue (Apple bug), then
- * make sure we only use one IO queue.
- */
- if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
- nr_io_queues = 1;
- else
- nr_io_queues = min(nvme_max_io_queues(dev),
- dev->nr_allocated_queues - 1);
-
+ nr_io_queues = dev->nr_allocated_queues - 1;
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;
@@ -2234,11 +2274,10 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+ req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
if (IS_ERR(req))
return PTR_ERR(req);
- req->timeout = ADMIN_TIMEOUT;
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
@@ -2254,7 +2293,7 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
unsigned long timeout;
retry:
- timeout = ADMIN_TIMEOUT;
+ timeout = NVME_ADMIN_TIMEOUT;
while (nr_queues > 0) {
if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
break;
@@ -3201,7 +3240,10 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
@@ -3217,6 +3259,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 65e3d0ef36e1..b7ce4f221d99 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -97,6 +97,7 @@ struct nvme_rdma_queue {
struct completion cm_done;
bool pi_support;
int cq_size;
+ struct mutex queue_lock;
};
struct nvme_rdma_ctrl {
@@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
int ret;
queue = &ctrl->queues[idx];
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
if (idx && ctrl->ctrl.max_integrity_segments)
queue->pi_support = true;
@@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
if (IS_ERR(queue->cm_id)) {
dev_info(ctrl->ctrl.device,
"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
- return PTR_ERR(queue->cm_id);
+ ret = PTR_ERR(queue->cm_id);
+ goto out_destroy_mutex;
}
if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
@@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
out_destroy_cm_id:
rdma_destroy_id(queue->cm_id);
nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
- if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
- return;
- __nvme_rdma_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ mutex_destroy(&queue->queue_lock);
}
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
@@ -797,7 +804,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
NVME_RDMA_DATA_SGL_SIZE;
set->driver_data = ctrl;
set->nr_hw_queues = 1;
- set->timeout = ADMIN_TIMEOUT;
+ set->timeout = NVME_ADMIN_TIMEOUT;
set->flags = BLK_MQ_F_NO_SCHED;
} else {
set = &ctrl->tag_set;
@@ -853,7 +860,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
return error;
ctrl->device = ctrl->queues[0].device;
- ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device);
+ ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
/* T10-PI support */
if (ctrl->device->dev->attrs.device_cap_flags &
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c0c33320fe65..881d28eb15e9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
struct work_struct io_work;
int io_cpu;
+ struct mutex queue_lock;
struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list;
@@ -201,7 +202,7 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
{
- return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
+ return min_t(size_t, iov_iter_single_seg_count(&req->iter),
req->pdu_len - req->pdu_sent);
}
@@ -262,6 +263,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
}
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+ int ret;
+
+ /* drain the send queue as much as we can... */
+ do {
+ ret = nvme_tcp_try_send(queue);
+ } while (ret > 0);
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -276,10 +287,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
* directly, otherwise queue io_work. Also, only do that if we
* are on the same cpu, so we don't introduce contention.
*/
- if (queue->io_cpu == smp_processor_id() &&
+ if (queue->io_cpu == __smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last;
- nvme_tcp_try_send(queue);
+ nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
} else if (last) {
@@ -1209,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
sock_release(queue->sock);
kfree(queue->pdu);
+ mutex_destroy(&queue->queue_lock);
}
static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1370,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
int ret, rcv_pdu_size;
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list);
@@ -1388,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (ret) {
dev_err(nctrl->device,
"failed to create socket: %d\n", ret);
- return ret;
+ goto err_destroy_mutex;
}
/* Single syn retry */
@@ -1497,6 +1510,8 @@ err_crypto:
err_sock:
sock_release(queue->sock);
queue->sock = NULL;
+err_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -1524,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
- return;
- __nvme_tcp_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
@@ -1568,7 +1584,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
set->nr_hw_queues = 1;
- set->timeout = ADMIN_TIMEOUT;
+ set->timeout = NVME_ADMIN_TIMEOUT;
} else {
set = &ctrl->tag_set;
memset(set, 0, sizeof(*set));
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 67e87e9f306f..1dfe9a3500e3 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -55,12 +55,17 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
int status;
/* Driver requires zone append support */
- if (!(le32_to_cpu(log->iocs[nvme_cmd_zone_append]) &
+ if ((le32_to_cpu(log->iocs[nvme_cmd_zone_append]) &
NVME_CMD_EFFECTS_CSUPP)) {
+ if (test_and_clear_bit(NVME_NS_FORCE_RO, &ns->flags))
+ dev_warn(ns->ctrl->device,
+ "Zone Append supported for zoned namespace:%d. Remove read-only mode\n",
+ ns->head->ns_id);
+ } else {
+ set_bit(NVME_NS_FORCE_RO, &ns->flags);
dev_warn(ns->ctrl->device,
- "append not supported for zoned namespace:%d\n",
- ns->head->ns_id);
- return -EINVAL;
+ "Zone Append not supported for zoned namespace:%d. Forcing to read-only mode\n",
+ ns->head->ns_id);
}
/* Lazily query controller append limit for the first zoned namespace */
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 8056955e652c..4be2ececbc45 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -24,7 +24,7 @@ config NVME_TARGET_PASSTHRU
This enables target side NVMe passthru controller support for the
NVMe Over Fabrics protocol. It allows for hosts to manage and
directly access an actual NVMe controller residing on the target
- side, incuding executing Vendor Unique Commands.
+ side, including executing Vendor Unique Commands.
If unsure, say N.
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index dca34489a1dc..dc1ea468b182 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -89,12 +89,12 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
if (!ns->bdev)
goto out;
- host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
- data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
- sectors[READ]), 1000);
- host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
- data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
- sectors[WRITE]), 1000);
+ host_reads = part_stat_read(ns->bdev, ios[READ]);
+ data_units_read =
+ DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
+ host_writes = part_stat_read(ns->bdev, ios[WRITE]);
+ data_units_written =
+ DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@@ -120,12 +120,12 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
/* we don't have the right data for file backed ns */
if (!ns->bdev)
continue;
- host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+ host_reads += part_stat_read(ns->bdev, ios[READ]);
data_units_read += DIV_ROUND_UP(
- part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
- host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+ part_stat_read(ns->bdev, sectors[READ]), 1000);
+ host_writes += part_stat_read(ns->bdev, ios[WRITE]);
data_units_written += DIV_ROUND_UP(
- part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
+ part_stat_read(ns->bdev, sectors[WRITE]), 1000);
}
put_unaligned_le64(host_reads, &slog->host_reads[0]);
@@ -487,8 +487,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
/* return an all zeroed buffer if we can't find an active namespace */
ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
- if (!ns)
+ if (!ns) {
+ status = NVME_SC_INVALID_NS;
goto done;
+ }
nvmet_ns_revalidate(ns);
@@ -541,7 +543,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id->nsattr |= (1 << 0);
nvmet_put_namespace(ns);
done:
- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ if (!status)
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
kfree(id);
out:
nvmet_req_complete(req, status);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 37e1d7784e17..c61ffd767062 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -736,9 +736,49 @@ static ssize_t nvmet_passthru_enable_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_passthru_, enable);
+static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
+}
+
+static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int timeout;
+
+ if (kstrtouint(page, 0, &timeout))
+ return -EINVAL;
+ subsys->admin_timeout = timeout;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
+
+static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
+}
+
+static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int timeout;
+
+ if (kstrtouint(page, 0, &timeout))
+ return -EINVAL;
+ subsys->io_timeout = timeout;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
+
static struct configfs_attribute *nvmet_passthru_attrs[] = {
&nvmet_passthru_attr_device_path,
&nvmet_passthru_attr_enable,
+ &nvmet_passthru_attr_admin_timeout,
+ &nvmet_passthru_attr_io_timeout,
NULL,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 957b39a82431..8ce4d59cc9e7 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -757,8 +757,6 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
{
cq->qid = qid;
cq->size = size;
-
- ctrl->cqs[qid] = cq;
}
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
@@ -1344,20 +1342,14 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!ctrl->changed_ns_list)
goto out_free_ctrl;
- ctrl->cqs = kcalloc(subsys->max_qid + 1,
- sizeof(struct nvmet_cq *),
- GFP_KERNEL);
- if (!ctrl->cqs)
- goto out_free_changed_ns_list;
-
ctrl->sqs = kcalloc(subsys->max_qid + 1,
sizeof(struct nvmet_sq *),
GFP_KERNEL);
if (!ctrl->sqs)
- goto out_free_cqs;
+ goto out_free_changed_ns_list;
if (subsys->cntlid_min > subsys->cntlid_max)
- goto out_free_cqs;
+ goto out_free_changed_ns_list;
ret = ida_simple_get(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
@@ -1395,8 +1387,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
out_free_sqs:
kfree(ctrl->sqs);
-out_free_cqs:
- kfree(ctrl->cqs);
out_free_changed_ns_list:
kfree(ctrl->changed_ns_list);
out_free_ctrl:
@@ -1426,7 +1416,6 @@ static void nvmet_ctrl_free(struct kref *ref)
nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
- kfree(ctrl->cqs);
kfree(ctrl->changed_ns_list);
kfree(ctrl);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index f40c05c33c3a..682854e0e079 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -69,6 +69,7 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_port *port;
struct nvmet_subsys_link *s;
+ lockdep_assert_held(&nvmet_config_sem);
nvmet_genctr++;
list_for_each_entry(port, nvmet_ports, global_entry)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 3da067a8311e..68213f0a052b 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -564,6 +564,50 @@ fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
fcloop_tfcp_req_put(tfcp_req);
}
+static bool drop_fabric_opcode;
+#define DROP_OPCODE_MASK 0x00FF
+/* fabrics opcode will have a bit set above 1st byte */
+static int drop_opcode = -1;
+static int drop_instance;
+static int drop_amount;
+static int drop_current_cnt;
+
+/*
+ * Routine to parse io and determine if the io is to be dropped.
+ * Returns:
+ * 0 if io is not obstructed
+ * 1 if io was dropped
+ */
+static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
+{
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
+ struct nvme_command *sqe = &cmdiu->sqe;
+
+ if (drop_opcode == -1)
+ return 0;
+
+ pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
+ "inst %d start %d amt %d\n",
+ __func__, sqe->common.opcode, sqe->fabrics.fctype,
+ drop_fabric_opcode ? "y" : "n",
+ drop_opcode, drop_current_cnt, drop_instance, drop_amount);
+
+ if ((drop_fabric_opcode &&
+ (sqe->common.opcode != nvme_fabrics_command ||
+ sqe->fabrics.fctype != drop_opcode)) ||
+ (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
+ return 0;
+
+ if (++drop_current_cnt >= drop_instance) {
+ if (drop_current_cnt >= drop_instance + drop_amount)
+ drop_opcode = -1;
+ return 1;
+ }
+
+ return 0;
+}
+
static void
fcloop_fcp_recv_work(struct work_struct *work)
{
@@ -590,10 +634,14 @@ fcloop_fcp_recv_work(struct work_struct *work)
if (unlikely(aborted))
ret = -ECANCELED;
- else
- ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+ else {
+ if (likely(!check_for_drop(tfcp_req)))
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
&tfcp_req->tgt_fcp_req,
fcpreq->cmdaddr, fcpreq->cmdlen);
+ else
+ pr_info("%s: dropped command ********\n", __func__);
+ }
if (ret)
fcloop_call_host_done(fcpreq, tfcp_req, ret);
@@ -1449,6 +1497,34 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
return ret ? ret : count;
}
+static ssize_t
+fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int opcode;
+ int starting, amount;
+
+ if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
+ return -EBADRQC;
+
+ drop_current_cnt = 0;
+ drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
+ drop_opcode = (opcode & DROP_OPCODE_MASK);
+ drop_instance = starting;
+ /* the check to drop routine uses instance + count to know when
+ * to end. Thus, if dropping 1 instance, count should be 0.
+ * so subtract 1 from the count.
+ */
+ drop_amount = amount - 1;
+
+ pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
+ "instances\n",
+ __func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
+ drop_opcode, drop_amount);
+
+ return count;
+}
+
static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
@@ -1456,6 +1532,7 @@ static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
+static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
static struct attribute *fcloop_dev_attrs[] = {
&dev_attr_add_local_port.attr,
@@ -1464,6 +1541,7 @@ static struct attribute *fcloop_dev_attrs[] = {
&dev_attr_del_remote_port.attr,
&dev_attr_add_target_port.attr,
&dev_attr_del_target_port.attr,
+ &dev_attr_set_cmd_drop.attr,
NULL
};
@@ -1511,8 +1589,8 @@ out_destroy_class:
static void __exit fcloop_exit(void)
{
- struct fcloop_lport *lport;
- struct fcloop_nport *nport;
+ struct fcloop_lport *lport = NULL;
+ struct fcloop_nport *nport = NULL;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index f6d81239be21..cb6f86572b24 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -211,6 +211,8 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
}
+static struct lock_class_key loop_hctx_fq_lock_key;
+
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -219,6 +221,14 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
+ /*
+ * flush_end_io() can be called recursively for us, so use our own
+ * lock class key for avoiding lockdep possible recursive locking,
+ * then we can remove the dynamically allocated lock class for each
+ * flush queue, that way may cause horrible boot delay.
+ */
+ blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
+
hctx->driver_data = queue;
return 0;
}
@@ -345,7 +355,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+ ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
ctrl->queues[0].ctrl = ctrl;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 559a15ccc322..592763732065 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -164,7 +164,6 @@ static inline struct nvmet_port *ana_groups_to_port(
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
- struct nvmet_cq **cqs;
struct nvmet_sq **sqs;
bool cmd_seen;
@@ -249,6 +248,8 @@ struct nvmet_subsys {
struct nvme_ctrl *passthru_ctrl;
char *passthru_ctrl_path;
struct config_group passthru_group;
+ unsigned int admin_timeout;
+ unsigned int io_timeout;
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
};
@@ -330,6 +331,7 @@ struct nvmet_req {
struct work_struct work;
} f;
struct {
+ struct bio inline_bio;
struct request *rq;
struct work_struct work;
bool use_workqueue;
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 8ee94f056898..b9776fc8f08f 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -188,35 +188,31 @@ static void nvmet_passthru_req_done(struct request *rq,
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
{
struct scatterlist *sg;
- int op_flags = 0;
struct bio *bio;
- int i, ret;
+ int i;
if (req->sg_cnt > BIO_MAX_PAGES)
return -EINVAL;
- if (req->cmd->common.opcode == nvme_cmd_flush)
- op_flags = REQ_FUA;
- else if (nvme_is_write(req->cmd))
- op_flags = REQ_SYNC | REQ_IDLE;
-
- bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
- bio->bi_end_io = bio_put;
- bio->bi_opf = req_op(rq) | op_flags;
+ if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ bio = &req->p.inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ } else {
+ bio = bio_alloc(GFP_KERNEL, min(req->sg_cnt, BIO_MAX_PAGES));
+ bio->bi_end_io = bio_put;
+ }
+ bio->bi_opf = req_op(rq);
for_each_sg(req->sg, sg, req->sg_cnt, i) {
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
sg->offset) < sg->length) {
- bio_put(bio);
+ if (bio != &req->p.inline_bio)
+ bio_put(bio);
return -EINVAL;
}
}
- ret = blk_rq_append_bio(rq, &bio);
- if (unlikely(ret)) {
- bio_put(bio);
- return ret;
- }
+ blk_rq_bio_prep(rq, bio, req->sg_cnt);
return 0;
}
@@ -227,6 +223,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
struct request_queue *q = ctrl->admin_q;
struct nvme_ns *ns = NULL;
struct request *rq = NULL;
+ unsigned int timeout;
u32 effects;
u16 status;
int ret;
@@ -242,14 +239,20 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
}
q = ns->queue;
+ timeout = req->sq->ctrl->subsys->io_timeout;
+ } else {
+ timeout = req->sq->ctrl->subsys->admin_timeout;
}
- rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY);
+ rq = nvme_alloc_request(q, req->cmd, 0);
if (IS_ERR(rq)) {
status = NVME_SC_INTERNAL;
goto out_put_ns;
}
+ if (timeout)
+ rq->timeout = timeout;
+
if (req->sg_cnt) {
ret = nvmet_passthru_map_sg(req, rq);
if (unlikely(ret)) {
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ae6620489457..06b6b742bb21 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -414,7 +414,8 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp;
- r->req.p2p_client = &ndev->device->dev;
+ if (!ib_uses_virt_dma(ndev->device))
+ r->req.p2p_client = &ndev->device->dev;
r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey;
@@ -1219,6 +1220,14 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
}
ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
+
+ if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags &
+ IB_DEVICE_INTEGRITY_HANDOVER)) {
+ pr_warn("T10-PI is not supported by device %s. Disabling it\n",
+ cm_id->device->name);
+ nport->pi_enable = false;
+ }
+
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -1640,6 +1649,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+
+ rsp = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp,
+ wait_list);
+ list_del(&rsp->wait_list);
+ nvmet_rdma_put_rsp(rsp);
+ }
+ fallthrough;
case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true;
@@ -1844,14 +1863,6 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
- if (port->nport->pi_enable &&
- !(cm_id->device->attrs.device_cap_flags &
- IB_DEVICE_INTEGRITY_HANDOVER)) {
- pr_err("T10-PI is not supported for %pISpcs\n", addr);
- ret = -EINVAL;
- goto out_destroy_id;
- }
-
port->cm_id = cm_id;
return 0;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index dc1f0f647189..aacf06f0b431 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length = cmd->pdu_len;
cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done;
- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
+ cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
sg = &cmd->req.sg[cmd->sg_idx];
@@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length -= iov_len;
sg = sg_next(sg);
iov++;
+ sg_offset = 0;
}
iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
diff --git a/drivers/of/device.c b/drivers/of/device.c
index aedfaaafd3e7..1122daa8e273 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -162,9 +162,11 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
- /* ...but only set bus limit if we found valid dma-ranges earlier */
- if (!ret)
+ /* ...but only set bus limit and range map if we found valid dma-ranges earlier */
+ if (!ret) {
dev->bus_dma_limit = end;
+ dev->dma_range_map = map;
+ }
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
@@ -172,6 +174,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
iommu = of_iommu_configure(dev, np, id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) {
+ /* Don't touch range map if it wasn't set from a valid dma-ranges */
+ if (!ret)
+ dev->dma_range_map = NULL;
kfree(map);
return -EPROBE_DEFER;
}
@@ -181,7 +186,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
- dev->dma_range_map = map;
return 0;
}
EXPORT_SYMBOL_GPL(of_dma_configure_id);
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 4268eb359915..c3f3d9249cc5 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -27,6 +27,10 @@
* various states of availability.
*/
LIST_HEAD(opp_tables);
+
+/* OPP tables with uninitialized required OPPs */
+LIST_HEAD(lazy_opp_tables);
+
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
@@ -146,6 +150,32 @@ unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
/**
+ * dev_pm_opp_get_required_pstate() - Gets the required performance state
+ * corresponding to an available opp
+ * @opp: opp for which performance state has to be returned for
+ * @index: index of the required opp
+ *
+ * Return: performance state read from device tree corresponding to the
+ * required opp, else return 0.
+ */
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index)
+{
+ if (IS_ERR_OR_NULL(opp) || !opp->available ||
+ index >= opp->opp_table->required_opp_count) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp->opp_table))
+ return 0;
+
+ return opp->required_opps[index]->pstate;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
+
+/**
* dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
* @opp: opp for which turbo mode is being verified
*
@@ -449,6 +479,55 @@ struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
+/**
+ * dev_pm_opp_find_level_ceil() - search for an rounded up level
+ * @dev: device for which we do this operation
+ * @level: level to search for
+ *
+ * Return: Searches for rounded up match in the opp table and returns pointer
+ * to the matching opp if found, else returns ERR_PTR in case of error and
+ * should be handled using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
+ return ERR_PTR(r);
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available && temp_opp->level >= *level) {
+ opp = temp_opp;
+ *level = opp->level;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
+
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
@@ -655,6 +734,10 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
{
int ret;
+ /* We may reach here for devices which don't change frequency */
+ if (IS_ERR(clk))
+ return 0;
+
ret = clk_set_rate(clk, freq);
if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
@@ -666,12 +749,12 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
static int _generic_set_opp_regulator(struct opp_table *opp_table,
struct device *dev,
- unsigned long old_freq,
+ struct dev_pm_opp *opp,
unsigned long freq,
- struct dev_pm_opp_supply *old_supply,
- struct dev_pm_opp_supply *new_supply)
+ int scaling_down)
{
struct regulator *reg = opp_table->regulators[0];
+ struct dev_pm_opp *old_opp = opp_table->current_opp;
int ret;
/* This function only supports single regulator per device */
@@ -681,8 +764,8 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
}
/* Scaling up? Scale voltage before frequency */
- if (freq >= old_freq) {
- ret = _set_opp_voltage(dev, reg, new_supply);
+ if (!scaling_down) {
+ ret = _set_opp_voltage(dev, reg, opp->supplies);
if (ret)
goto restore_voltage;
}
@@ -693,8 +776,8 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
goto restore_voltage;
/* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
- ret = _set_opp_voltage(dev, reg, new_supply);
+ if (scaling_down) {
+ ret = _set_opp_voltage(dev, reg, opp->supplies);
if (ret)
goto restore_freq;
}
@@ -712,19 +795,18 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
return 0;
restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
+ if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate))
dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
+ __func__, old_opp->rate);
restore_voltage:
/* This shouldn't harm even if the voltages weren't updated earlier */
- if (old_supply)
- _set_opp_voltage(dev, reg, old_supply);
+ _set_opp_voltage(dev, reg, old_opp->supplies);
return ret;
}
static int _set_opp_bw(const struct opp_table *opp_table,
- struct dev_pm_opp *opp, struct device *dev, bool remove)
+ struct dev_pm_opp *opp, struct device *dev)
{
u32 avg, peak;
int i, ret;
@@ -733,7 +815,7 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
for (i = 0; i < opp_table->path_count; i++) {
- if (remove) {
+ if (!opp) {
avg = 0;
peak = 0;
} else {
@@ -743,7 +825,7 @@ static int _set_opp_bw(const struct opp_table *opp_table,
ret = icc_set_bw(opp_table->paths[i], avg, peak);
if (ret) {
dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
- remove ? "remove" : "set", i, ret);
+ opp ? "set" : "remove", i, ret);
return ret;
}
}
@@ -752,29 +834,31 @@ static int _set_opp_bw(const struct opp_table *opp_table,
}
static int _set_opp_custom(const struct opp_table *opp_table,
- struct device *dev, unsigned long old_freq,
- unsigned long freq,
- struct dev_pm_opp_supply *old_supply,
- struct dev_pm_opp_supply *new_supply)
+ struct device *dev, struct dev_pm_opp *opp,
+ unsigned long freq)
{
- struct dev_pm_set_opp_data *data;
+ struct dev_pm_set_opp_data *data = opp_table->set_opp_data;
+ struct dev_pm_opp *old_opp = opp_table->current_opp;
int size;
- data = opp_table->set_opp_data;
+ /*
+ * We support this only if dev_pm_opp_set_regulators() was called
+ * earlier.
+ */
+ if (opp_table->sod_supplies) {
+ size = sizeof(*old_opp->supplies) * opp_table->regulator_count;
+ memcpy(data->old_opp.supplies, old_opp->supplies, size);
+ memcpy(data->new_opp.supplies, opp->supplies, size);
+ data->regulator_count = opp_table->regulator_count;
+ } else {
+ data->regulator_count = 0;
+ }
+
data->regulators = opp_table->regulators;
- data->regulator_count = opp_table->regulator_count;
data->clk = opp_table->clk;
data->dev = dev;
-
- data->old_opp.rate = old_freq;
- size = sizeof(*old_supply) * opp_table->regulator_count;
- if (!old_supply)
- memset(data->old_opp.supplies, 0, size);
- else
- memcpy(data->old_opp.supplies, old_supply, size);
-
+ data->old_opp.rate = old_opp->rate;
data->new_opp.rate = freq;
- memcpy(data->new_opp.supplies, new_supply, size);
return opp_table->set_opp(data);
}
@@ -809,6 +893,10 @@ static int _set_required_opps(struct device *dev,
if (!required_opp_tables)
return 0;
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp_table))
+ return -EBUSY;
+
/* Single genpd case */
if (!genpd_virt_devs)
return _set_required_opp(dev, dev, opp, 0);
@@ -841,38 +929,32 @@ static int _set_required_opps(struct device *dev,
return ret;
}
-/**
- * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp
- * @dev: device for which we do this operation
- * @opp: opp based on which the bandwidth levels are to be configured
- *
- * This configures the bandwidth to the levels specified by the OPP. However
- * if the OPP specified is NULL the bandwidth levels are cleared out.
- *
- * Return: 0 on success or a negative error value.
- */
-int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp)
+static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
{
- struct opp_table *opp_table;
- int ret;
+ struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
+ unsigned long freq;
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "%s: device opp table doesn't exist\n", __func__);
- return PTR_ERR(opp_table);
+ if (!IS_ERR(opp_table->clk)) {
+ freq = clk_get_rate(opp_table->clk);
+ opp = _find_freq_ceil(opp_table, &freq);
}
- if (opp)
- ret = _set_opp_bw(opp_table, opp, dev, false);
- else
- ret = _set_opp_bw(opp_table, NULL, dev, true);
+ /*
+ * Unable to find the current OPP ? Pick the first from the list since
+ * it is in ascending order, otherwise rest of the code will need to
+ * make special checks to validate current_opp.
+ */
+ if (IS_ERR(opp)) {
+ mutex_lock(&opp_table->lock);
+ opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node);
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+ }
- dev_pm_opp_put_opp_table(opp_table);
- return ret;
+ opp_table->current_opp = opp;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw);
-static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
+static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
{
int ret;
@@ -887,7 +969,7 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
if (!_get_opp_count(opp_table))
return 0;
- ret = _set_opp_bw(opp_table, NULL, dev, true);
+ ret = _set_opp_bw(opp_table, NULL, dev);
if (ret)
return ret;
@@ -900,6 +982,89 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
return ret;
}
+static int _set_opp(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, unsigned long freq)
+{
+ struct dev_pm_opp *old_opp;
+ int scaling_down, ret;
+
+ if (unlikely(!opp))
+ return _disable_opp_table(dev, opp_table);
+
+ /* Find the currently set OPP if we don't know already */
+ if (unlikely(!opp_table->current_opp))
+ _find_current_opp(dev, opp_table);
+
+ old_opp = opp_table->current_opp;
+
+ /* Return early if nothing to do */
+ if (opp_table->enabled && old_opp == opp) {
+ dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
+ return 0;
+ }
+
+ dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
+ __func__, old_opp->rate, freq, old_opp->level, opp->level,
+ old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
+ opp->bandwidth ? opp->bandwidth[0].peak : 0);
+
+ scaling_down = _opp_compare_key(old_opp, opp);
+ if (scaling_down == -1)
+ scaling_down = 0;
+
+ /* Scaling up? Configure required OPPs before frequency */
+ if (!scaling_down) {
+ ret = _set_required_opps(dev, opp_table, opp, true);
+ if (ret) {
+ dev_err(dev, "Failed to set required opps: %d\n", ret);
+ return ret;
+ }
+
+ ret = _set_opp_bw(opp_table, opp, dev);
+ if (ret) {
+ dev_err(dev, "Failed to set bw: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (opp_table->set_opp) {
+ ret = _set_opp_custom(opp_table, dev, opp, freq);
+ } else if (opp_table->regulators) {
+ ret = _generic_set_opp_regulator(opp_table, dev, opp, freq,
+ scaling_down);
+ } else {
+ /* Only frequency scaling */
+ ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ }
+
+ if (ret)
+ return ret;
+
+ /* Scaling down? Configure required OPPs after frequency */
+ if (scaling_down) {
+ ret = _set_opp_bw(opp_table, opp, dev);
+ if (ret) {
+ dev_err(dev, "Failed to set bw: %d\n", ret);
+ return ret;
+ }
+
+ ret = _set_required_opps(dev, opp_table, opp, false);
+ if (ret) {
+ dev_err(dev, "Failed to set required opps: %d\n", ret);
+ return ret;
+ }
+ }
+
+ opp_table->enabled = true;
+ dev_pm_opp_put(old_opp);
+
+ /* Make sure current_opp doesn't get freed */
+ dev_pm_opp_get(opp);
+ opp_table->current_opp = opp;
+
+ return ret;
+}
+
/**
* dev_pm_opp_set_rate() - Configure new OPP based on frequency
* @dev: device for which we do this operation
@@ -914,118 +1079,85 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
struct opp_table *opp_table;
- unsigned long freq, old_freq, temp_freq;
- struct dev_pm_opp *old_opp, *opp;
- struct clk *clk;
+ unsigned long freq = 0, temp_freq;
+ struct dev_pm_opp *opp = NULL;
int ret;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
- dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
return PTR_ERR(opp_table);
}
- if (unlikely(!target_freq)) {
- ret = _opp_set_rate_zero(dev, opp_table);
- goto put_opp_table;
- }
-
- clk = opp_table->clk;
- if (IS_ERR(clk)) {
- dev_err(dev, "%s: No clock available for the device\n",
- __func__);
- ret = PTR_ERR(clk);
- goto put_opp_table;
- }
-
- freq = clk_round_rate(clk, target_freq);
- if ((long)freq <= 0)
- freq = target_freq;
-
- old_freq = clk_get_rate(clk);
-
- /* Return early if nothing to do */
- if (opp_table->enabled && old_freq == freq) {
- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
- __func__, freq);
- ret = 0;
- goto put_opp_table;
- }
-
- /*
- * For IO devices which require an OPP on some platforms/SoCs
- * while just needing to scale the clock on some others
- * we look for empty OPP tables with just a clock handle and
- * scale only the clk. This makes dev_pm_opp_set_rate()
- * equivalent to a clk_set_rate()
- */
- if (!_get_opp_count(opp_table)) {
- ret = _generic_set_opp_clk_only(dev, clk, freq);
- goto put_opp_table;
- }
+ if (target_freq) {
+ /*
+ * For IO devices which require an OPP on some platforms/SoCs
+ * while just needing to scale the clock on some others
+ * we look for empty OPP tables with just a clock handle and
+ * scale only the clk. This makes dev_pm_opp_set_rate()
+ * equivalent to a clk_set_rate()
+ */
+ if (!_get_opp_count(opp_table)) {
+ ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq);
+ goto put_opp_table;
+ }
- temp_freq = old_freq;
- old_opp = _find_freq_ceil(opp_table, &temp_freq);
- if (IS_ERR(old_opp)) {
- dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
- __func__, old_freq, PTR_ERR(old_opp));
- }
+ freq = clk_round_rate(opp_table->clk, target_freq);
+ if ((long)freq <= 0)
+ freq = target_freq;
- temp_freq = freq;
- opp = _find_freq_ceil(opp_table, &temp_freq);
- if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
- __func__, freq, ret);
- goto put_old_opp;
+ /*
+ * The clock driver may support finer resolution of the
+ * frequencies than the OPP table, don't update the frequency we
+ * pass to clk_set_rate() here.
+ */
+ temp_freq = freq;
+ opp = _find_freq_ceil(opp_table, &temp_freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+ __func__, freq, ret);
+ goto put_opp_table;
+ }
}
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
- old_freq, freq);
+ ret = _set_opp(dev, opp_table, opp, freq);
- /* Scaling up? Configure required OPPs before frequency */
- if (freq >= old_freq) {
- ret = _set_required_opps(dev, opp_table, opp, true);
- if (ret)
- goto put_opp;
- }
-
- if (opp_table->set_opp) {
- ret = _set_opp_custom(opp_table, dev, old_freq, freq,
- IS_ERR(old_opp) ? NULL : old_opp->supplies,
- opp->supplies);
- } else if (opp_table->regulators) {
- ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
- IS_ERR(old_opp) ? NULL : old_opp->supplies,
- opp->supplies);
- } else {
- /* Only frequency scaling */
- ret = _generic_set_opp_clk_only(dev, clk, freq);
- }
+ if (target_freq)
+ dev_pm_opp_put(opp);
+put_opp_table:
+ dev_pm_opp_put_opp_table(opp_table);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
- /* Scaling down? Configure required OPPs after frequency */
- if (!ret && freq < old_freq) {
- ret = _set_required_opps(dev, opp_table, opp, false);
- if (ret)
- dev_err(dev, "Failed to set required opps: %d\n", ret);
- }
+/**
+ * dev_pm_opp_set_opp() - Configure device for OPP
+ * @dev: device for which we do this operation
+ * @opp: OPP to set to
+ *
+ * This configures the device based on the properties of the OPP passed to this
+ * routine.
+ *
+ * Return: 0 on success, a negative error number otherwise.
+ */
+int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
+{
+ struct opp_table *opp_table;
+ int ret;
- if (!ret) {
- ret = _set_opp_bw(opp_table, opp, dev, false);
- if (!ret)
- opp_table->enabled = true;
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ return PTR_ERR(opp_table);
}
-put_opp:
- dev_pm_opp_put(opp);
-put_old_opp:
- if (!IS_ERR(old_opp))
- dev_pm_opp_put(old_opp);
-put_opp_table:
+ ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0);
dev_pm_opp_put_opp_table(opp_table);
+
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);
/* OPP-dev Helpers */
static void _remove_opp_dev(struct opp_device *opp_dev,
@@ -1075,6 +1207,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
mutex_init(&opp_table->lock);
mutex_init(&opp_table->genpd_virt_dev_lock);
INIT_LIST_HEAD(&opp_table->dev_list);
+ INIT_LIST_HEAD(&opp_table->lazy);
/* Mark regulator count uninitialized */
opp_table->regulator_count = -1;
@@ -1087,21 +1220,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
_of_init_opp_table(opp_table, dev, index);
- /* Find clk for the device */
- opp_table->clk = clk_get(dev, NULL);
- if (IS_ERR(opp_table->clk)) {
- ret = PTR_ERR(opp_table->clk);
- if (ret == -EPROBE_DEFER)
- goto err;
-
- dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
- }
-
/* Find interconnect path(s) for the device */
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
if (ret) {
if (ret == -EPROBE_DEFER)
- goto err;
+ goto remove_opp_dev;
dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
__func__, ret);
@@ -1113,6 +1236,8 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return opp_table;
+remove_opp_dev:
+ _remove_opp_dev(opp_dev, opp_table);
err:
kfree(opp_table);
return ERR_PTR(ret);
@@ -1123,6 +1248,37 @@ void _get_opp_table_kref(struct opp_table *opp_table)
kref_get(&opp_table->kref);
}
+static struct opp_table *_update_opp_table_clk(struct device *dev,
+ struct opp_table *opp_table,
+ bool getclk)
+{
+ int ret;
+
+ /*
+ * Return early if we don't need to get clk or we have already tried it
+ * earlier.
+ */
+ if (!getclk || IS_ERR(opp_table) || opp_table->clk)
+ return opp_table;
+
+ /* Find clk for the device */
+ opp_table->clk = clk_get(dev, NULL);
+
+ ret = PTR_ERR_OR_ZERO(opp_table->clk);
+ if (!ret)
+ return opp_table;
+
+ if (ret == -ENOENT) {
+ dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
+ return opp_table;
+ }
+
+ dev_pm_opp_put_opp_table(opp_table);
+ dev_err_probe(dev, ret, "Couldn't find clock\n");
+
+ return ERR_PTR(ret);
+}
+
/*
* We need to make sure that the OPP table for a device doesn't get added twice,
* if this routine gets called in parallel with the same device pointer.
@@ -1138,7 +1294,8 @@ void _get_opp_table_kref(struct opp_table *opp_table)
* uses the opp_tables_busy flag to indicate if another creator is in the middle
* of adding an OPP table and others should wait for it to finish.
*/
-struct opp_table *_add_opp_table_indexed(struct device *dev, int index)
+struct opp_table *_add_opp_table_indexed(struct device *dev, int index,
+ bool getclk)
{
struct opp_table *opp_table;
@@ -1185,12 +1342,12 @@ again:
unlock:
mutex_unlock(&opp_table_lock);
- return opp_table;
+ return _update_opp_table_clk(dev, opp_table, getclk);
}
-struct opp_table *_add_opp_table(struct device *dev)
+static struct opp_table *_add_opp_table(struct device *dev, bool getclk)
{
- return _add_opp_table_indexed(dev, 0);
+ return _add_opp_table_indexed(dev, 0, getclk);
}
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
@@ -1209,6 +1366,9 @@ static void _opp_table_kref_release(struct kref *kref)
list_del(&opp_table->node);
mutex_unlock(&opp_table_lock);
+ if (opp_table->current_opp)
+ dev_pm_opp_put(opp_table->current_opp);
+
_of_clear_opp_table(opp_table);
/* Release clk */
@@ -1503,6 +1663,21 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
return 0;
}
+void _required_opps_available(struct dev_pm_opp *opp, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (opp->required_opps[i]->available)
+ continue;
+
+ opp->available = false;
+ pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
+ __func__, opp->required_opps[i]->np, opp->rate);
+ return;
+ }
+}
+
/*
* Returns:
* 0: On success. And appropriate error message for duplicate OPPs.
@@ -1522,12 +1697,10 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
mutex_lock(&opp_table->lock);
head = &opp_table->opp_list;
- if (likely(!rate_not_available)) {
- ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
- if (ret) {
- mutex_unlock(&opp_table->lock);
- return ret;
- }
+ ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
+ if (ret) {
+ mutex_unlock(&opp_table->lock);
+ return ret;
}
list_add(&new_opp->node, head);
@@ -1544,6 +1717,12 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
__func__, new_opp->rate);
}
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp_table))
+ return 0;
+
+ _required_opps_available(new_opp, opp_table->required_opp_count);
+
return 0;
}
@@ -1626,7 +1805,7 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
{
struct opp_table *opp_table;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1688,7 +1867,7 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
struct opp_table *opp_table;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1732,38 +1911,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
-static int _allocate_set_opp_data(struct opp_table *opp_table)
-{
- struct dev_pm_set_opp_data *data;
- int len, count = opp_table->regulator_count;
-
- if (WARN_ON(!opp_table->regulators))
- return -EINVAL;
-
- /* space for set_opp_data */
- len = sizeof(*data);
-
- /* space for old_opp.supplies and new_opp.supplies */
- len += 2 * sizeof(struct dev_pm_opp_supply) * count;
-
- data = kzalloc(len, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->old_opp.supplies = (void *)(data + 1);
- data->new_opp.supplies = data->old_opp.supplies + count;
-
- opp_table->set_opp_data = data;
-
- return 0;
-}
-
-static void _free_set_opp_data(struct opp_table *opp_table)
-{
- kfree(opp_table->set_opp_data);
- opp_table->set_opp_data = NULL;
-}
-
/**
* dev_pm_opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
@@ -1780,11 +1927,12 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
const char * const names[],
unsigned int count)
{
+ struct dev_pm_opp_supply *supplies;
struct opp_table *opp_table;
struct regulator *reg;
int ret, i;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1821,10 +1969,19 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
opp_table->regulator_count = count;
- /* Allocate block only once to pass to set_opp() routines */
- ret = _allocate_set_opp_data(opp_table);
- if (ret)
+ supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL);
+ if (!supplies) {
+ ret = -ENOMEM;
goto free_regulators;
+ }
+
+ mutex_lock(&opp_table->lock);
+ opp_table->sod_supplies = supplies;
+ if (opp_table->set_opp_data) {
+ opp_table->set_opp_data->old_opp.supplies = supplies;
+ opp_table->set_opp_data->new_opp.supplies = supplies + count;
+ }
+ mutex_unlock(&opp_table->lock);
return opp_table;
@@ -1867,7 +2024,15 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- _free_set_opp_data(opp_table);
+ mutex_lock(&opp_table->lock);
+ if (opp_table->set_opp_data) {
+ opp_table->set_opp_data->old_opp.supplies = NULL;
+ opp_table->set_opp_data->new_opp.supplies = NULL;
+ }
+
+ kfree(opp_table->sod_supplies);
+ opp_table->sod_supplies = NULL;
+ mutex_unlock(&opp_table->lock);
kfree(opp_table->regulators);
opp_table->regulators = NULL;
@@ -1895,7 +2060,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
struct opp_table *opp_table;
int ret;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1905,9 +2070,11 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
goto err;
}
- /* Already have default clk set, free it */
- if (!IS_ERR(opp_table->clk))
- clk_put(opp_table->clk);
+ /* clk shouldn't be initialized at this point */
+ if (WARN_ON(opp_table->clk)) {
+ ret = -EBUSY;
+ goto err;
+ }
/* Find clk for the device */
opp_table->clk = clk_get(dev, name);
@@ -1961,12 +2128,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
int (*set_opp)(struct dev_pm_set_opp_data *data))
{
+ struct dev_pm_set_opp_data *data;
struct opp_table *opp_table;
if (!set_opp)
return ERR_PTR(-EINVAL);
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -1977,8 +2145,23 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
}
/* Another CPU that shares the OPP table has set the helper ? */
- if (!opp_table->set_opp)
- opp_table->set_opp = set_opp;
+ if (opp_table->set_opp)
+ return opp_table;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&opp_table->lock);
+ opp_table->set_opp_data = data;
+ if (opp_table->sod_supplies) {
+ data->old_opp.supplies = opp_table->sod_supplies;
+ data->new_opp.supplies = opp_table->sod_supplies +
+ opp_table->regulator_count;
+ }
+ mutex_unlock(&opp_table->lock);
+
+ opp_table->set_opp = set_opp;
return opp_table;
}
@@ -2000,10 +2183,50 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
WARN_ON(!list_empty(&opp_table->opp_list));
opp_table->set_opp = NULL;
+
+ mutex_lock(&opp_table->lock);
+ kfree(opp_table->set_opp_data);
+ opp_table->set_opp_data = NULL;
+ mutex_unlock(&opp_table->lock);
+
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
+static void devm_pm_opp_unregister_set_opp_helper(void *data)
+{
+ dev_pm_opp_unregister_set_opp_helper(data);
+}
+
+/**
+ * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * @dev: Device for which the helper is getting registered.
+ * @set_opp: Custom set OPP helper.
+ *
+ * This is a resource-managed version of dev_pm_opp_register_set_opp_helper().
+ *
+ * Return: pointer to 'struct opp_table' on success and errorno otherwise.
+ */
+struct opp_table *
+devm_pm_opp_register_set_opp_helper(struct device *dev,
+ int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+ struct opp_table *opp_table;
+ int err;
+
+ opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp);
+ if (IS_ERR(opp_table))
+ return opp_table;
+
+ err = devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper,
+ opp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ return opp_table;
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper);
+
static void _opp_detach_genpd(struct opp_table *opp_table)
{
int index;
@@ -2053,7 +2276,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
int index = 0, ret = -EINVAL;
const char **name = names;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
return opp_table;
@@ -2139,6 +2362,97 @@ void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
+static void devm_pm_opp_detach_genpd(void *data)
+{
+ dev_pm_opp_detach_genpd(data);
+}
+
+/**
+ * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual
+ * device pointer
+ * @dev: Consumer device for which the genpd is getting attached.
+ * @names: Null terminated array of pointers containing names of genpd to attach.
+ * @virt_devs: Pointer to return the array of virtual devices.
+ *
+ * This is a resource-managed version of dev_pm_opp_attach_genpd().
+ *
+ * Return: pointer to 'struct opp_table' on success and errorno otherwise.
+ */
+struct opp_table *
+devm_pm_opp_attach_genpd(struct device *dev, const char **names,
+ struct device ***virt_devs)
+{
+ struct opp_table *opp_table;
+ int err;
+
+ opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs);
+ if (IS_ERR(opp_table))
+ return opp_table;
+
+ err = devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd,
+ opp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ return opp_table;
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd);
+
+/**
+ * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
+ * @src_table: OPP table which has @dst_table as one of its required OPP table.
+ * @dst_table: Required OPP table of the @src_table.
+ * @src_opp: OPP from the @src_table.
+ *
+ * This function returns the OPP (present in @dst_table) pointed out by the
+ * "required-opps" property of the @src_opp (present in @src_table).
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ *
+ * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise.
+ */
+struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
+ struct opp_table *dst_table,
+ struct dev_pm_opp *src_opp)
+{
+ struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV);
+ int i;
+
+ if (!src_table || !dst_table || !src_opp ||
+ !src_table->required_opp_tables)
+ return ERR_PTR(-EINVAL);
+
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(src_table))
+ return ERR_PTR(-EBUSY);
+
+ for (i = 0; i < src_table->required_opp_count; i++) {
+ if (src_table->required_opp_tables[i] == dst_table) {
+ mutex_lock(&src_table->lock);
+
+ list_for_each_entry(opp, &src_table->opp_list, node) {
+ if (opp == src_opp) {
+ dest_opp = opp->required_opps[i];
+ dev_pm_opp_get(dest_opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&src_table->lock);
+ break;
+ }
+ }
+
+ if (IS_ERR(dest_opp)) {
+ pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__,
+ src_table, dst_table);
+ }
+
+ return dest_opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp);
+
/**
* dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
* @src_table: OPP table which has dst_table as one of its required OPP table.
@@ -2167,9 +2481,13 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
* and so none of them have the "required-opps" property set. Return the
* pstate of the src_table as it is in such cases.
*/
- if (!src_table->required_opp_count)
+ if (!src_table || !src_table->required_opp_count)
return pstate;
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(src_table))
+ return -EBUSY;
+
for (i = 0; i < src_table->required_opp_count; i++) {
if (src_table->required_opp_tables[i]->np == dst_table->np)
break;
@@ -2221,7 +2539,7 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
struct opp_table *opp_table;
int ret;
- opp_table = _add_opp_table(dev);
+ opp_table = _add_opp_table(dev, true);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@@ -2499,3 +2817,44 @@ void dev_pm_opp_remove_table(struct device *dev)
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
+
+/**
+ * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
+ * @dev: device for which we do this operation
+ *
+ * Sync voltage state of the OPP table regulators.
+ *
+ * Return: 0 on success or a negative error value.
+ */
+int dev_pm_opp_sync_regulators(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+ int i, ret = 0;
+
+ /* Device may not have OPP table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return 0;
+
+ /* Regulator may not be required for the device */
+ if (unlikely(!opp_table->regulators))
+ goto put_table;
+
+ /* Nothing to sync if voltage wasn't changed */
+ if (!opp_table->enabled)
+ goto put_table;
+
+ for (i = 0; i < opp_table->regulator_count; i++) {
+ reg = opp_table->regulators[i];
+ ret = regulator_sync_voltage(reg);
+ if (ret)
+ break;
+ }
+put_table:
+ /* Drop reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 03cb387236c4..f480c10e6314 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -144,7 +144,7 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table)
for (i = 0; i < opp_table->required_opp_count; i++) {
if (IS_ERR_OR_NULL(required_opp_tables[i]))
- break;
+ continue;
dev_pm_opp_put_opp_table(required_opp_tables[i]);
}
@@ -153,6 +153,7 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table)
opp_table->required_opp_count = 0;
opp_table->required_opp_tables = NULL;
+ list_del(&opp_table->lazy);
}
/*
@@ -165,6 +166,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
{
struct opp_table **required_opp_tables;
struct device_node *required_np, *np;
+ bool lazy = false;
int count, i;
/* Traversing the first OPP node is all we need */
@@ -195,8 +197,10 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
required_opp_tables[i] = _find_table_of_opp_np(required_np);
of_node_put(required_np);
- if (IS_ERR(required_opp_tables[i]))
- goto free_required_tables;
+ if (IS_ERR(required_opp_tables[i])) {
+ lazy = true;
+ continue;
+ }
/*
* We only support genpd's OPPs in the "required-opps" for now,
@@ -210,6 +214,10 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
}
}
+ /* Let's do the linking later on */
+ if (lazy)
+ list_add(&opp_table->lazy, &lazy_opp_tables);
+
goto put_np;
free_required_tables:
@@ -278,14 +286,14 @@ void _of_opp_free_required_opps(struct opp_table *opp_table,
for (i = 0; i < opp_table->required_opp_count; i++) {
if (!required_opps[i])
- break;
+ continue;
/* Put the reference back */
dev_pm_opp_put(required_opps[i]);
}
- kfree(required_opps);
opp->required_opps = NULL;
+ kfree(required_opps);
}
/* Populate all required OPPs which are part of "required-opps" list */
@@ -309,6 +317,10 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
for (i = 0; i < count; i++) {
required_table = opp_table->required_opp_tables[i];
+ /* Required table not added yet, we will link later */
+ if (IS_ERR_OR_NULL(required_table))
+ continue;
+
np = of_parse_required_opp(opp->np, i);
if (unlikely(!np)) {
ret = -ENODEV;
@@ -334,6 +346,104 @@ free_required_opps:
return ret;
}
+/* Link required OPPs for an individual OPP */
+static int lazy_link_required_opps(struct opp_table *opp_table,
+ struct opp_table *new_table, int index)
+{
+ struct device_node *required_np;
+ struct dev_pm_opp *opp;
+
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
+ required_np = of_parse_required_opp(opp->np, index);
+ if (unlikely(!required_np))
+ return -ENODEV;
+
+ opp->required_opps[index] = _find_opp_of_np(new_table, required_np);
+ of_node_put(required_np);
+
+ if (!opp->required_opps[index]) {
+ pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
+ __func__, opp->np, index);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/* Link required OPPs for all OPPs of the newly added OPP table */
+static void lazy_link_required_opp_table(struct opp_table *new_table)
+{
+ struct opp_table *opp_table, *temp, **required_opp_tables;
+ struct device_node *required_np, *opp_np, *required_table_np;
+ struct dev_pm_opp *opp;
+ int i, ret;
+
+ /*
+ * We only support genpd's OPPs in the "required-opps" for now,
+ * as we don't know much about other cases.
+ */
+ if (!new_table->is_genpd)
+ return;
+
+ mutex_lock(&opp_table_lock);
+
+ list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) {
+ bool lazy = false;
+
+ /* opp_np can't be invalid here */
+ opp_np = of_get_next_available_child(opp_table->np, NULL);
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ required_opp_tables = opp_table->required_opp_tables;
+
+ /* Required opp-table is already parsed */
+ if (!IS_ERR(required_opp_tables[i]))
+ continue;
+
+ /* required_np can't be invalid here */
+ required_np = of_parse_required_opp(opp_np, i);
+ required_table_np = of_get_parent(required_np);
+
+ of_node_put(required_table_np);
+ of_node_put(required_np);
+
+ /*
+ * Newly added table isn't the required opp-table for
+ * opp_table.
+ */
+ if (required_table_np != new_table->np) {
+ lazy = true;
+ continue;
+ }
+
+ required_opp_tables[i] = new_table;
+ _get_opp_table_kref(new_table);
+
+ /* Link OPPs now */
+ ret = lazy_link_required_opps(opp_table, new_table, i);
+ if (ret) {
+ /* The OPPs will be marked unusable */
+ lazy = false;
+ break;
+ }
+ }
+
+ of_node_put(opp_np);
+
+ /* All required opp-tables found, remove from lazy list */
+ if (!lazy) {
+ list_del(&opp_table->lazy);
+ INIT_LIST_HEAD(&opp_table->lazy);
+
+ list_for_each_entry(opp, &opp_table->opp_list, node)
+ _required_opps_available(opp, opp_table->required_opp_count);
+ }
+ }
+
+ mutex_unlock(&opp_table_lock);
+}
+
static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
{
struct device_node *np, *opp_np;
@@ -755,7 +865,6 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
struct device *dev, struct device_node *np)
{
struct dev_pm_opp *new_opp;
- u64 rate = 0;
u32 val;
int ret;
bool rate_not_available = false;
@@ -772,7 +881,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
- dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+ dev_dbg(dev, "OPP not supported by hardware: %lu\n",
+ new_opp->rate);
goto free_opp;
}
@@ -822,10 +932,11 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
- pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+ pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n",
__func__, new_opp->turbo, new_opp->rate,
new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
- new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
+ new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns,
+ new_opp->level);
/*
* Notify the changes in the availability of the operable
@@ -888,6 +999,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
}
}
+ lazy_link_required_opp_table(opp_table);
+
return 0;
remove_static_opp:
@@ -956,29 +1069,23 @@ remove_static_opp:
return ret;
}
-/**
- * dev_pm_opp_of_add_table() - Initialize opp table from device tree
- * @dev: device pointer used to lookup OPP table.
- *
- * Register the initial OPP table with the OPP library for given device.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -ENODEV when 'operating-points' property is not found or is invalid data
- * in device node.
- * -ENODATA when empty 'operating-points' property is found
- * -EINVAL when invalid entries are found in opp-v2 table
- */
-int dev_pm_opp_of_add_table(struct device *dev)
+static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
{
struct opp_table *opp_table;
- int ret;
+ int ret, count;
- opp_table = _add_opp_table_indexed(dev, 0);
+ if (index) {
+ /*
+ * If only one phandle is present, then the same OPP table
+ * applies for all index requests.
+ */
+ count = of_count_phandle_with_args(dev->of_node,
+ "operating-points-v2", NULL);
+ if (count == 1)
+ index = 0;
+ }
+
+ opp_table = _add_opp_table_indexed(dev, index, getclk);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@@ -996,15 +1103,12 @@ int dev_pm_opp_of_add_table(struct device *dev)
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
/**
- * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
+ * dev_pm_opp_of_add_table() - Initialize opp table from device tree
* @dev: device pointer used to lookup OPP table.
- * @index: Index number.
*
- * Register the initial OPP table with the OPP library for given device only
- * using the "operating-points-v2" property.
+ * Register the initial OPP table with the OPP library for given device.
*
* Return:
* 0 On success OR
@@ -1017,34 +1121,46 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
* -ENODATA when empty 'operating-points' property is found
* -EINVAL when invalid entries are found in opp-v2 table
*/
-int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
+int dev_pm_opp_of_add_table(struct device *dev)
{
- struct opp_table *opp_table;
- int ret, count;
-
- if (index) {
- /*
- * If only one phandle is present, then the same OPP table
- * applies for all index requests.
- */
- count = of_count_phandle_with_args(dev->of_node,
- "operating-points-v2", NULL);
- if (count == 1)
- index = 0;
- }
-
- opp_table = _add_opp_table_indexed(dev, index);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- ret = _of_add_opp_table_v2(dev, opp_table);
- if (ret)
- dev_pm_opp_put_opp_table(opp_table);
+ return _of_add_table_indexed(dev, 0, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
- return ret;
+/**
+ * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
+ * @dev: device pointer used to lookup OPP table.
+ * @index: Index number.
+ *
+ * Register the initial OPP table with the OPP library for given device only
+ * using the "operating-points-v2" property.
+ *
+ * Return: Refer to dev_pm_opp_of_add_table() for return values.
+ */
+int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
+{
+ return _of_add_table_indexed(dev, index, true);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
+/**
+ * dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
+ * tree without getting clk for device.
+ * @dev: device pointer used to lookup OPP table.
+ * @index: Index number.
+ *
+ * Register the initial OPP table with the OPP library for given device only
+ * using the "operating-points-v2" property. Do not try to get the clk for the
+ * device.
+ *
+ * Return: Refer to dev_pm_opp_of_add_table() for return values.
+ */
+int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
+{
+ return _of_add_table_indexed(dev, index, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk);
+
/* CPU device specific helpers */
/**
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 4ced7ffa8158..9b9daf83b074 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -26,7 +26,7 @@ struct regulator;
/* Lock to allow exclusive modification to the device and opp lists */
extern struct mutex opp_table_lock;
-extern struct list_head opp_tables;
+extern struct list_head opp_tables, lazy_opp_tables;
/*
* Internal data structure organization with the OPP layer library is as
@@ -135,6 +135,7 @@ enum opp_table_access {
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
+ * @current_opp: Currently configured OPP for the table.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
* @genpd_virt_devs: List of virtual devices for multiple genpd support.
@@ -155,6 +156,7 @@ enum opp_table_access {
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_opp: Platform specific set_opp callback
+ * @sod_supplies: Set opp data supplies
* @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
@@ -166,7 +168,7 @@ enum opp_table_access {
* meant for book keeping and private to OPP library.
*/
struct opp_table {
- struct list_head node;
+ struct list_head node, lazy;
struct blocking_notifier_head head;
struct list_head dev_list;
@@ -182,6 +184,7 @@ struct opp_table {
unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
+ struct dev_pm_opp *current_opp;
struct dev_pm_opp *suspend_opp;
struct mutex genpd_virt_dev_lock;
@@ -202,6 +205,7 @@ struct opp_table {
bool is_genpd;
int (*set_opp)(struct dev_pm_set_opp_data *data);
+ struct dev_pm_opp_supply *sod_supplies;
struct dev_pm_set_opp_data *set_opp_data;
#ifdef CONFIG_DEBUG_FS
@@ -223,9 +227,14 @@ int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
-struct opp_table *_add_opp_table(struct device *dev);
-struct opp_table *_add_opp_table_indexed(struct device *dev, int index);
+struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk);
void _put_opp_list_kref(struct opp_table *opp_table);
+void _required_opps_available(struct dev_pm_opp *opp, int count);
+
+static inline bool lazy_linking_pending(struct opp_table *opp_table)
+{
+ return unlikely(!list_empty(&opp_table->lazy));
+}
#ifdef CONFIG_OF
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 522d2b974e91..11cc79411e2d 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
remove.o pci.o pci-driver.o search.o \
pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
- setup-bus.o vc.o mmap.o setup-irq.o
+ setup-bus.o vc.o mmap.o setup-irq.o msi.o
obj-$(CONFIG_PCI) += pcie/
@@ -18,7 +18,6 @@ endif
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
-obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
obj-$(CONFIG_PCI_BRIDGE_EMUL) += pci-bridge-emul.o
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 586b9d69fa5e..dac1ac8a7615 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -12,6 +12,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
@@ -153,7 +154,8 @@ static const struct cdns_pcie_ops j721e_pcie_ops = {
.link_up = j721e_pcie_link_up,
};
-static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon)
+static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
+ unsigned int offset)
{
struct device *dev = pcie->dev;
u32 mask = J721E_MODE_RC;
@@ -164,7 +166,7 @@ static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon)
if (mode == PCI_MODE_RC)
val = J721E_MODE_RC;
- ret = regmap_update_bits(syscon, 0, mask, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret)
dev_err(dev, "failed to set pcie mode\n");
@@ -172,7 +174,7 @@ static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon)
}
static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
- struct regmap *syscon)
+ struct regmap *syscon, unsigned int offset)
{
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
@@ -185,7 +187,7 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
link_speed = 2;
val = link_speed - 1;
- ret = regmap_update_bits(syscon, 0, GENERATION_SEL_MASK, val);
+ ret = regmap_update_bits(syscon, offset, GENERATION_SEL_MASK, val);
if (ret)
dev_err(dev, "failed to set link speed\n");
@@ -193,7 +195,7 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
}
static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
- struct regmap *syscon)
+ struct regmap *syscon, unsigned int offset)
{
struct device *dev = pcie->dev;
u32 lanes = pcie->num_lanes;
@@ -201,7 +203,7 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
int ret;
val = LANE_COUNT(lanes - 1);
- ret = regmap_update_bits(syscon, 0, LANE_COUNT_MASK, val);
+ ret = regmap_update_bits(syscon, offset, LANE_COUNT_MASK, val);
if (ret)
dev_err(dev, "failed to set link count\n");
@@ -212,6 +214,8 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
struct regmap *syscon;
int ret;
@@ -221,19 +225,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return PTR_ERR(syscon);
}
- ret = j721e_pcie_set_mode(pcie, syscon);
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-pcie-ctrl", 1,
+ 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ ret = j721e_pcie_set_mode(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set pci mode\n");
return ret;
}
- ret = j721e_pcie_set_link_speed(pcie, syscon);
+ ret = j721e_pcie_set_link_speed(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set link speed\n");
return ret;
}
- ret = j721e_pcie_set_lane_count(pcie, syscon);
+ ret = j721e_pcie_set_lane_count(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set num-lanes\n");
return ret;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 84cc58dc8512..9e2b024d32f2 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -530,12 +530,9 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
}
pcie->mem_res = res;
- ret = of_property_read_u32(np, "cdns,max-outbound-regions",
- &ep->max_regions);
- if (ret < 0) {
- dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
- return ret;
- }
+ ep->max_regions = CDNS_PCIE_MAX_OB;
+ of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions);
+
ep->ob_addr = devm_kcalloc(dev,
ep->max_regions, sizeof(*ep->ob_addr),
GFP_KERNEL);
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index feed1e3038f4..30eba6cafe2c 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -197,6 +197,7 @@ enum cdns_pcie_rp_bar {
};
#define CDNS_PCIE_RP_MAX_IB 0x3
+#define CDNS_PCIE_MAX_OB 32
struct cdns_pcie_rp_ib_bar {
u64 size;
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index bc049865f8e0..22c5529e9a65 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -83,10 +83,15 @@ config PCIE_DW_PLAT_EP
selected.
config PCI_EXYNOS
- bool "Samsung Exynos PCIe controller"
- depends on SOC_EXYNOS5440 || COMPILE_TEST
+ tristate "Samsung Exynos PCIe controller"
+ depends on ARCH_EXYNOS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Samsung Exynos SoCs
+ to work in host mode. The PCI controller is based on the DesignWare
+ hardware and therefore the driver re-uses the DesignWare core
+ functions to implement the driver.
config PCI_IMX6
bool "Freescale i.MX6/7/8 PCIe controller"
@@ -107,7 +112,7 @@ config PCI_KEYSTONE
config PCI_KEYSTONE_HOST
bool "PCI Keystone Host Mode"
- depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCI_KEYSTONE
@@ -119,7 +124,7 @@ config PCI_KEYSTONE_HOST
config PCI_KEYSTONE_EP
bool "PCI Keystone Endpoint Mode"
- depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCI_KEYSTONE
@@ -169,6 +174,7 @@ config PCIE_QCOM
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select CRC8
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 6d012d2b1e90..b105af63854a 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -181,11 +181,6 @@ static int dra7xx_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- dw_pcie_setup_rc(pp);
-
- dra7xx_pcie_establish_link(pci);
- dw_pcie_wait_for_link(pci);
- dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(dra7xx);
return 0;
@@ -377,133 +372,8 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
return 0;
}
-static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u64 msi_target;
-
- msi_target = (u64)pp->msi_data;
-
- msg->address_lo = lower_32_bits(msi_target);
- msg->address_hi = upper_32_bits(msi_target);
-
- msg->data = d->hwirq;
-
- dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
- (int)d->hwirq, msg->address_hi, msg->address_lo);
-}
-
-static int dra7xx_pcie_msi_set_affinity(struct irq_data *d,
- const struct cpumask *mask,
- bool force)
-{
- return -EINVAL;
-}
-
-static void dra7xx_pcie_bottom_mask(struct irq_data *d)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- unsigned int res, bit, ctrl;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&pp->lock, flags);
-
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
-
- pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
- pp->irq_mask[ctrl]);
-
- raw_spin_unlock_irqrestore(&pp->lock, flags);
-}
-
-static void dra7xx_pcie_bottom_unmask(struct irq_data *d)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- unsigned int res, bit, ctrl;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&pp->lock, flags);
-
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
-
- pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
- pp->irq_mask[ctrl]);
-
- raw_spin_unlock_irqrestore(&pp->lock, flags);
-}
-
-static void dra7xx_pcie_bottom_ack(struct irq_data *d)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- unsigned int res, bit, ctrl;
-
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
-
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
-}
-
-static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = {
- .name = "DRA7XX-PCI-MSI",
- .irq_ack = dra7xx_pcie_bottom_ack,
- .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg,
- .irq_set_affinity = dra7xx_pcie_msi_set_affinity,
- .irq_mask = dra7xx_pcie_bottom_mask,
- .irq_unmask = dra7xx_pcie_bottom_unmask,
-};
-
-static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- u32 ctrl, num_ctrls;
- int ret;
-
- pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip;
-
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- pp->irq_mask[ctrl] = ~0;
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- pp->irq_mask[ctrl]);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- ~0);
- }
-
- ret = dw_pcie_allocate_domains(pp);
- if (ret)
- return ret;
-
- pp->msi_data = dma_map_single_attrs(dev, &pp->msi_msg,
- sizeof(pp->msi_msg),
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- ret = dma_mapping_error(dev, pp->msi_data);
- if (ret) {
- dev_err(dev, "Failed to map MSI data\n");
- pp->msi_data = 0;
- dw_pcie_free_msi(pp);
- }
- return ret;
-}
-
static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
.host_init = dra7xx_pcie_host_init,
- .msi_host_init = dra7xx_pcie_msi_host_init,
};
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -578,7 +448,6 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
{
int ret;
struct dw_pcie_ep *ep;
- struct resource *res;
struct device *dev = &pdev->dev;
struct dw_pcie *pci = dra7xx->pci;
@@ -594,13 +463,6 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
if (IS_ERR(pci->dbi_base2))
return PTR_ERR(pci->dbi_base2);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
@@ -622,6 +484,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
if (pp->irq < 0)
return pp->irq;
+ /* MSI IRQ is muxed */
+ pp->msi_irq = -ENODEV;
+
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 242683cde04a..c24dab383654 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -2,26 +2,23 @@
/*
* PCIe host controller driver for Samsung Exynos SoCs
*
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2013-2020 Samsung Electronics Co., Ltd.
* https://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
+ * Jaehoon Chung <jh80.chung@samsung.com>
*/
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/resource.h>
-#include <linux/signal.h>
-#include <linux/types.h>
+#include <linux/regulator/consumer.h>
#include "pcie-designware.h"
@@ -37,102 +34,43 @@
#define PCIE_IRQ_SPECIAL 0x008
#define PCIE_IRQ_EN_PULSE 0x00c
#define PCIE_IRQ_EN_LEVEL 0x010
-#define IRQ_MSI_ENABLE BIT(2)
#define PCIE_IRQ_EN_SPECIAL 0x014
-#define PCIE_PWR_RESET 0x018
+#define PCIE_SW_WAKE 0x018
+#define PCIE_BUS_EN BIT(1)
#define PCIE_CORE_RESET 0x01c
#define PCIE_CORE_RESET_ENABLE BIT(0)
#define PCIE_STICKY_RESET 0x020
#define PCIE_NONSTICKY_RESET 0x024
#define PCIE_APP_INIT_RESET 0x028
#define PCIE_APP_LTSSM_ENABLE 0x02c
-#define PCIE_ELBI_RDLH_LINKUP 0x064
+#define PCIE_ELBI_RDLH_LINKUP 0x074
+#define PCIE_ELBI_XMLH_LINKUP BIT(4)
#define PCIE_ELBI_LTSSM_ENABLE 0x1
#define PCIE_ELBI_SLV_AWMISC 0x11c
#define PCIE_ELBI_SLV_ARMISC 0x120
#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
-struct exynos_pcie_mem_res {
- void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */
-};
-
-struct exynos_pcie_clk_res {
- struct clk *clk;
- struct clk *bus_clk;
-};
-
struct exynos_pcie {
- struct dw_pcie *pci;
- struct exynos_pcie_mem_res *mem_res;
- struct exynos_pcie_clk_res *clk_res;
- const struct exynos_pcie_ops *ops;
- int reset_gpio;
-
+ struct dw_pcie pci;
+ void __iomem *elbi_base;
+ struct clk *clk;
+ struct clk *bus_clk;
struct phy *phy;
+ struct regulator_bulk_data supplies[2];
};
-struct exynos_pcie_ops {
- int (*get_mem_resources)(struct platform_device *pdev,
- struct exynos_pcie *ep);
- int (*get_clk_resources)(struct exynos_pcie *ep);
- int (*init_clk_resources)(struct exynos_pcie *ep);
- void (*deinit_clk_resources)(struct exynos_pcie *ep);
-};
-
-static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
- struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
- if (!ep->mem_res)
- return -ENOMEM;
-
- ep->mem_res->elbi_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(ep->mem_res->elbi_base))
- return PTR_ERR(ep->mem_res->elbi_base);
-
- return 0;
-}
-
-static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep)
+static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL);
- if (!ep->clk_res)
- return -ENOMEM;
-
- ep->clk_res->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ep->clk_res->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ep->clk_res->clk);
- }
-
- ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(ep->clk_res->bus_clk)) {
- dev_err(dev, "Failed to get pcie bus clock\n");
- return PTR_ERR(ep->clk_res->bus_clk);
- }
-
- return 0;
-}
-
-static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
+ struct device *dev = ep->pci.dev;
int ret;
- ret = clk_prepare_enable(ep->clk_res->clk);
+ ret = clk_prepare_enable(ep->clk);
if (ret) {
dev_err(dev, "cannot enable pcie rc clock");
return ret;
}
- ret = clk_prepare_enable(ep->clk_res->bus_clk);
+ ret = clk_prepare_enable(ep->bus_clk);
if (ret) {
dev_err(dev, "cannot enable pcie bus clock");
goto err_bus_clk;
@@ -141,24 +79,17 @@ static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
return 0;
err_bus_clk:
- clk_disable_unprepare(ep->clk_res->clk);
+ clk_disable_unprepare(ep->clk);
return ret;
}
-static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep)
+static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
{
- clk_disable_unprepare(ep->clk_res->bus_clk);
- clk_disable_unprepare(ep->clk_res->clk);
+ clk_disable_unprepare(ep->bus_clk);
+ clk_disable_unprepare(ep->clk);
}
-static const struct exynos_pcie_ops exynos5440_pcie_ops = {
- .get_mem_resources = exynos5440_pcie_get_mem_resources,
- .get_clk_resources = exynos5440_pcie_get_clk_resources,
- .init_clk_resources = exynos5440_pcie_init_clk_resources,
- .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources,
-};
-
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
{
writel(val, base + reg);
@@ -173,115 +104,71 @@ static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
}
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
}
static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
-}
-
-static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- if (ep->reset_gpio >= 0)
- devm_gpio_request_one(dev, ep->reset_gpio,
- GPIOF_OUT_INIT_HIGH, "RESET");
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
}
-static int exynos_pcie_establish_link(struct exynos_pcie *ep)
+static int exynos_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
-
- if (dw_pcie_link_up(pci)) {
- dev_err(dev, "Link already up\n");
- return 0;
- }
-
- exynos_pcie_assert_core_reset(ep);
-
- phy_reset(ep->phy);
-
- exynos_pcie_writel(ep->mem_res->elbi_base, 1,
- PCIE_PWR_RESET);
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+ u32 val;
- phy_power_on(ep->phy);
- phy_init(ep->phy);
-
- exynos_pcie_deassert_core_reset(ep);
- dw_pcie_setup_rc(pp);
- exynos_pcie_assert_reset(ep);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
+ val &= ~PCIE_BUS_EN;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
/* assert LTSSM enable */
- exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
PCIE_APP_LTSSM_ENABLE);
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- phy_power_off(ep->phy);
- return -ETIMEDOUT;
+ return 0;
}
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
{
- u32 val;
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE);
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE);
-}
-
-static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
-{
- u32 val;
-
- /* enable INTX interrupt */
- val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
- IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
}
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
@@ -292,26 +179,14 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void exynos_pcie_msi_init(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
- u32 val;
-
- dw_pcie_msi_init(pp);
-
- /* enable MSI interrupt */
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL);
- val |= IRQ_MSI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL);
-}
-
-static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
{
- exynos_pcie_enable_irq_pulse(ep);
+ u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- exynos_pcie_msi_init(ep);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
}
static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
@@ -370,13 +245,9 @@ static struct pci_ops exynos_pci_ops = {
static int exynos_pcie_link_up(struct dw_pcie *pci)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
- u32 val;
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- if (val == PCIE_ELBI_LTSSM_ENABLE)
- return 1;
-
- return 0;
+ return (val & PCIE_ELBI_XMLH_LINKUP);
}
static int exynos_pcie_host_init(struct pcie_port *pp)
@@ -386,8 +257,14 @@ static int exynos_pcie_host_init(struct pcie_port *pp)
pp->bridge->ops = &exynos_pci_ops;
- exynos_pcie_establish_link(ep);
- exynos_pcie_enable_interrupts(ep);
+ exynos_pcie_assert_core_reset(ep);
+
+ phy_reset(ep->phy);
+ phy_power_on(ep->phy);
+ phy_init(ep->phy);
+
+ exynos_pcie_deassert_core_reset(ep);
+ exynos_pcie_enable_irq_pulse(ep);
return 0;
}
@@ -396,32 +273,27 @@ static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
.host_init = exynos_pcie_host_init,
};
-static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
+static int exynos_add_pcie_port(struct exynos_pcie *ep,
struct platform_device *pdev)
{
- struct dw_pcie *pci = ep->pci;
+ struct dw_pcie *pci = &ep->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
- pp->irq = platform_get_irq(pdev, 1);
+ pp->irq = platform_get_irq(pdev, 0);
if (pp->irq < 0)
return pp->irq;
ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
- IRQF_SHARED, "exynos-pcie", ep);
+ IRQF_SHARED, "exynos-pcie", ep);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
pp->ops = &exynos_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -436,12 +308,12 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.read_dbi = exynos_pcie_read_dbi,
.write_dbi = exynos_pcie_write_dbi,
.link_up = exynos_pcie_link_up,
+ .start_link = exynos_pcie_start_link,
};
-static int __init exynos_pcie_probe(struct platform_device *pdev)
+static int exynos_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct dw_pcie *pci;
struct exynos_pcie *ep;
struct device_node *np = dev->of_node;
int ret;
@@ -450,43 +322,45 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
if (!ep)
return -ENOMEM;
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
- return -ENOMEM;
-
- pci->dev = dev;
- pci->ops = &dw_pcie_ops;
-
- ep->pci = pci;
- ep->ops = (const struct exynos_pcie_ops *)
- of_device_get_match_data(dev);
-
- ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ ep->pci.dev = dev;
+ ep->pci.ops = &dw_pcie_ops;
ep->phy = devm_of_phy_get(dev, np, NULL);
- if (IS_ERR(ep->phy)) {
- if (PTR_ERR(ep->phy) != -ENODEV)
- return PTR_ERR(ep->phy);
+ if (IS_ERR(ep->phy))
+ return PTR_ERR(ep->phy);
- ep->phy = NULL;
- }
+ /* External Local Bus interface (ELBI) registers */
+ ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(ep->elbi_base))
+ return PTR_ERR(ep->elbi_base);
- if (ep->ops && ep->ops->get_mem_resources) {
- ret = ep->ops->get_mem_resources(pdev, ep);
- if (ret)
- return ret;
+ ep->clk = devm_clk_get(dev, "pcie");
+ if (IS_ERR(ep->clk)) {
+ dev_err(dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(ep->clk);
}
- if (ep->ops && ep->ops->get_clk_resources &&
- ep->ops->init_clk_resources) {
- ret = ep->ops->get_clk_resources(ep);
- if (ret)
- return ret;
- ret = ep->ops->init_clk_resources(ep);
- if (ret)
- return ret;
+ ep->bus_clk = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(ep->bus_clk)) {
+ dev_err(dev, "Failed to get pcie bus clock\n");
+ return PTR_ERR(ep->bus_clk);
}
+ ep->supplies[0].supply = "vdd18";
+ ep->supplies[1].supply = "vdd10";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ep->supplies),
+ ep->supplies);
+ if (ret)
+ return ret;
+
+ ret = exynos_pcie_init_clk_resources(ep);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, ep);
ret = exynos_add_pcie_port(ep, pdev);
@@ -497,9 +371,9 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
fail_probe:
phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
- if (ep->ops && ep->ops->deinit_clk_resources)
- ep->ops->deinit_clk_resources(ep);
return ret;
}
@@ -507,32 +381,65 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)
{
struct exynos_pcie *ep = platform_get_drvdata(pdev);
- if (ep->ops && ep->ops->deinit_clk_resources)
- ep->ops->deinit_clk_resources(ep);
+ dw_pcie_host_deinit(&ep->pci.pp);
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
return 0;
}
+static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &ep->pci;
+ struct pcie_port *pp = &pci->pp;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
+ /* exynos_pcie_host_init controls ep->phy */
+ exynos_pcie_host_init(pp);
+ dw_pcie_setup_rc(pp);
+ exynos_pcie_start_link(pci);
+ return dw_pcie_wait_for_link(pci);
+}
+
+static const struct dev_pm_ops exynos_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
+ exynos_pcie_resume_noirq)
+};
+
static const struct of_device_id exynos_pcie_of_match[] = {
- {
- .compatible = "samsung,exynos5440-pcie",
- .data = &exynos5440_pcie_ops
- },
- {},
+ { .compatible = "samsung,exynos5433-pcie", },
+ { },
};
static struct platform_driver exynos_pcie_driver = {
+ .probe = exynos_pcie_probe,
.remove = __exit_p(exynos_pcie_remove),
.driver = {
.name = "exynos-pcie",
.of_match_table = exynos_pcie_of_match,
+ .pm = &exynos_pcie_pm_ops,
},
};
-
-/* Exynos PCIe driver does not allow module unload */
-
-static int __init exynos_pcie_init(void)
-{
- return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
-}
-subsys_initcall(exynos_pcie_init);
+module_platform_driver(exynos_pcie_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 5cf1ef12fb9b..0cf1333c0440 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -745,9 +745,9 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
}
}
-static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
+static int imx6_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
struct device *dev = pci->dev;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
@@ -834,9 +834,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
imx6_setup_phy_mpll(imx6_pcie);
- dw_pcie_setup_rc(pp);
- imx6_pcie_establish_link(imx6_pcie);
- dw_pcie_msi_init(pp);
return 0;
}
@@ -845,33 +842,8 @@ static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
-static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- pp->ops = &imx6_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
- /* No special ops needed, but pcie-designware still expects this struct */
+ .start_link = imx6_pcie_start_link,
};
#ifdef CONFIG_PM_SLEEP
@@ -980,7 +952,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
imx6_pcie_deassert_core_reset(imx6_pcie);
dw_pcie_setup_rc(pp);
- ret = imx6_pcie_establish_link(imx6_pcie);
+ ret = imx6_pcie_start_link(imx6_pcie->pci);
if (ret < 0)
dev_info(dev, "pcie link is down after resume.\n");
@@ -1014,6 +986,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &imx6_pcie_host_ops;
imx6_pcie->pci = pci;
imx6_pcie->drvdata = of_device_get_match_data(dev);
@@ -1163,7 +1136,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = imx6_add_pcie_port(imx6_pcie, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index a222728238ca..53aa35cb3a49 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -121,6 +121,7 @@ struct keystone_pcie {
int msi_host_irq;
int num_lanes;
+ u32 num_viewport;
struct phy **phy;
struct device_link **link;
struct device_node *msi_intc_np;
@@ -272,14 +273,6 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
}
-/*
- * Dummy function so that DW core doesn't configure MSI
- */
-static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
-{
- return 0;
-}
-
static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
@@ -394,9 +387,9 @@ static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
u32 val;
+ u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
- u32 num_viewport = pci->num_viewport;
u64 start, end;
struct resource *mem;
int i;
@@ -519,14 +512,8 @@ static void ks_pcie_stop_link(struct dw_pcie *pci)
static int ks_pcie_start_link(struct dw_pcie *pci)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- struct device *dev = pci->dev;
u32 val;
- if (dw_pcie_link_up(pci)) {
- dev_dbg(dev, "link is already up\n");
- return 0;
- }
-
/* Initiate Link Training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
@@ -821,8 +808,6 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
if (ret)
return ret;
- dw_pcie_setup_rc(pp);
-
ks_pcie_stop_link(pci);
ks_pcie_setup_rc_app_regs(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
@@ -841,9 +826,6 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
"Asynchronous external abort");
#endif
- ks_pcie_start_link(pci);
- dw_pcie_wait_for_link(pci);
-
return 0;
}
@@ -854,7 +836,6 @@ static const struct dw_pcie_host_ops ks_pcie_host_ops = {
static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
.host_init = ks_pcie_host_init,
- .msi_host_init = ks_pcie_am654_msi_host_init,
};
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
@@ -864,23 +845,6 @@ static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
return ks_pcie_handle_error_irq(ks_pcie);
}
-static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size, u32 val)
{
@@ -977,33 +941,6 @@ static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
.get_features = &ks_pcie_am654_get_features,
};
-static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = ks_pcie->pci;
-
- ep = &pci->ep;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
-
- return 0;
-}
-
static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
{
int num_lanes = ks_pcie->num_lanes;
@@ -1157,6 +1094,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct resource *res;
unsigned int version;
void __iomem *base;
+ u32 num_viewport;
struct phy **phy;
u32 num_lanes;
char name[10];
@@ -1288,6 +1226,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ goto err_get_sync;
+ }
+
/*
* "Power Sequencing and Reset Signal Timings" table in
* PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
@@ -1301,8 +1245,9 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
gpiod_set_value_cansleep(gpiod, 1);
}
+ ks_pcie->num_viewport = num_viewport;
pci->pp.ops = host_ops;
- ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
goto err_get_sync;
break;
@@ -1313,7 +1258,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
}
pci->ep.ops = ep_ops;
- ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
+ ret = dw_pcie_ep_init(&pci->ep);
if (ret < 0)
goto err_get_sync;
break;
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 84206f265e54..4d12efdacd2f 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -18,8 +18,6 @@
#include "pcie-designware.h"
-#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
-
#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
struct ls_pcie_ep_drvdata {
@@ -124,34 +122,6 @@ static const struct of_device_id ls_pcie_ep_of_match[] = {
{ },
};
-static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
- struct dw_pcie_ep *ep;
- struct resource *res;
- int ret;
-
- ep = &pci->ep;
- ep->ops = pcie->drvdata->ops;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
-
- return 0;
-}
-
static int __init ls_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -159,7 +129,6 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
struct ls_pcie_ep *pcie;
struct pci_epc_features *ls_epc;
struct resource *dbi_base;
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -188,13 +157,11 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
- pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
+ pci->ep.ops = &ls_pcie_ep_ops;
platform_set_drvdata(pdev, pcie);
- ret = ls_add_pcie_ep(pcie, pdev);
-
- return ret;
+ return dw_pcie_ep_init(&pci->ep);
}
static struct platform_driver ls_pcie_ep_driver = {
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index f24f79a70d9a..44ad34cdc3bc 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -83,14 +83,6 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
}
-static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
-{
- int i;
-
- for (i = 0; i < PCIE_IATU_NUM; i++)
- dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
-}
-
static int ls1021_pcie_link_up(struct dw_pcie *pci)
{
u32 state;
@@ -136,12 +128,6 @@ static int ls_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
- /*
- * Disable outbound windows configured by the bootloader to avoid
- * one transaction hitting multiple outbound windows.
- * dw_pcie_setup_rc() will reconfigure the outbound windows.
- */
- ls_pcie_disable_outbound_atus(pcie);
ls_pcie_fix_error_response(pcie);
dw_pcie_dbi_ro_wr_en(pci);
@@ -150,8 +136,6 @@ static int ls_pcie_host_init(struct pcie_port *pp)
ls_pcie_drop_msg_tlp(pcie);
- dw_pcie_setup_rc(pp);
-
return 0;
}
@@ -182,37 +166,12 @@ static int ls1021_pcie_host_init(struct pcie_port *pp)
return ls_pcie_host_init(pp);
}
-static int ls_pcie_msi_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- struct device_node *msi_node;
-
- /*
- * The MSI domain is set by the generic of_msi_configure(). This
- * .msi_host_init() function keeps us from doing the default MSI
- * domain setup in dw_pcie_host_init() and also enforces the
- * requirement that "msi-parent" exists.
- */
- msi_node = of_parse_phandle(np, "msi-parent", 0);
- if (!msi_node) {
- dev_err(dev, "failed to find msi-parent\n");
- return -EINVAL;
- }
-
- of_node_put(msi_node);
- return 0;
-}
-
static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
.host_init = ls1021_pcie_host_init,
- .msi_host_init = ls_pcie_msi_host_init,
};
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
.host_init = ls_pcie_host_init,
- .msi_host_init = ls_pcie_msi_host_init,
};
static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
@@ -273,31 +232,12 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ },
};
-static int __init ls_add_pcie_port(struct ls_pcie *pcie)
-{
- struct dw_pcie *pci = pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- int ret;
-
- pp->ops = pcie->drvdata->ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static int __init ls_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct ls_pcie *pcie;
struct resource *dbi_base;
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -311,6 +251,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
+ pci->pp.ops = pcie->drvdata->ops;
pcie->pci = pci;
@@ -326,11 +267,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
- ret = ls_add_pcie_port(pcie);
- if (ret < 0)
- return ret;
-
- return 0;
+ return dw_pcie_host_init(&pci->pp);
}
static struct platform_driver ls_pcie_driver = {
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 1913dc2c8fa0..686ded034f22 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -231,7 +231,7 @@ static void meson_pcie_assert_reset(struct meson_pcie *mp)
gpiod_set_value_cansleep(mp->reset_gpio, 0);
}
-static void meson_pcie_init_dw(struct meson_pcie *mp)
+static void meson_pcie_ltssm_enable(struct meson_pcie *mp)
{
u32 val;
@@ -289,20 +289,14 @@ static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
}
-static int meson_pcie_establish_link(struct meson_pcie *mp)
+static int meson_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = &mp->pci;
- struct pcie_port *pp = &pci->pp;
-
- meson_pcie_init_dw(mp);
- meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
- meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
-
- dw_pcie_setup_rc(pp);
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ meson_pcie_ltssm_enable(mp);
meson_pcie_assert_reset(mp);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
@@ -380,15 +374,11 @@ static int meson_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct meson_pcie *mp = to_meson_pcie(pci);
- int ret;
pp->bridge->ops = &meson_pci_ops;
- ret = meson_pcie_establish_link(mp);
- if (ret)
- return ret;
-
- dw_pcie_msi_init(pp);
+ meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+ meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
return 0;
}
@@ -397,33 +387,9 @@ static const struct dw_pcie_host_ops meson_pcie_host_ops = {
.host_init = meson_pcie_host_init,
};
-static int meson_add_pcie_port(struct meson_pcie *mp,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = &mp->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- pp->ops = &meson_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = meson_pcie_link_up,
+ .start_link = meson_pcie_start_link,
};
static int meson_pcie_probe(struct platform_device *pdev)
@@ -440,6 +406,7 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci = &mp->pci;
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &meson_pcie_host_ops;
pci->num_lanes = 1;
mp->phy = devm_phy_get(dev, "pcie");
@@ -486,7 +453,7 @@ static int meson_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mp);
- ret = meson_add_pcie_port(mp, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0) {
dev_err(dev, "Add PCIe port failed, %d\n", ret);
goto err_phy;
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index f973fbca90cf..abf37aa68e51 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -76,7 +76,6 @@ static int al_pcie_init(struct pci_config_window *cfg)
}
const struct pci_ecam_ops al_pcie_ops = {
- .bus_shift = 20,
.init = al_pcie_init,
.pci_ops = {
.map_bus = al_pcie_map_bus,
@@ -138,8 +137,6 @@ struct al_pcie {
struct al_pcie_target_bus_cfg target_bus_cfg;
};
-#define PCIE_ECAM_DEVFN(x) (((x) & 0xff) << 12)
-
#define to_al_pcie(x) dev_get_drvdata((x)->dev)
static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
@@ -226,11 +223,6 @@ static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
- void __iomem *pci_base_addr;
-
- pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base +
- (busnr_ecam << 20) +
- PCIE_ECAM_DEVFN(devfn));
if (busnr_reg != target_bus_cfg->reg_val) {
dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
@@ -241,7 +233,7 @@ static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
target_bus_cfg->reg_mask);
}
- return pci_base_addr + where;
+ return pp->va_cfg0_base + PCIE_ECAM_OFFSET(busnr_ecam, devfn, where);
}
static struct pci_ops al_child_pci_ops = {
@@ -264,7 +256,7 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
target_bus_cfg = &pcie->target_bus_cfg;
- ecam_bus_mask = (pcie->ecam_size >> 20) - 1;
+ ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
if (ecam_bus_mask > 255) {
dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
ecam_bus_mask = 255;
@@ -322,23 +314,6 @@ static const struct dw_pcie_host_ops al_pcie_host_ops = {
.host_init = al_pcie_host_init,
};
-static int al_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret;
-
- pp->ops = &al_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
};
@@ -347,7 +322,6 @@ static int al_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *controller_res;
struct resource *ecam_res;
- struct resource *dbi_res;
struct al_pcie *al_pcie;
struct dw_pcie *pci;
@@ -361,15 +335,11 @@ static int al_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &al_pcie_host_ops;
al_pcie->pci = pci;
al_pcie->dev = dev;
- dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (!ecam_res) {
dev_err(dev, "couldn't find 'config' reg in DT\n");
@@ -386,12 +356,11 @@ static int al_pcie_probe(struct platform_device *pdev)
return PTR_ERR(al_pcie->controller_base);
}
- dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n",
- dbi_res, controller_res);
+ dev_dbg(dev, "From DT: controller_base: %pR\n", controller_res);
platform_set_drvdata(pdev, al_pcie);
- return al_add_pcie_port(&pci->pp, pdev);
+ return dw_pcie_host_init(&pci->pp);
}
static const struct of_device_id al_pcie_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 13901f359a41..4e2552dcf982 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -154,10 +154,22 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
+static int armada8k_pcie_start_link(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ /* Start LTSSM */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg |= PCIE_APP_LTSSM_EN;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
+ return 0;
+}
+
+static int armada8k_pcie_host_init(struct pcie_port *pp)
{
- struct dw_pcie *pci = pcie->pci;
u32 reg;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
if (!dw_pcie_link_up(pci)) {
/* Disable LTSSM state machine to enable configuration */
@@ -193,26 +205,6 @@ static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
- if (!dw_pcie_link_up(pci)) {
- /* Configuration done. Start LTSSM */
- reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
- reg |= PCIE_APP_LTSSM_EN;
- dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
- }
-
- /* Wait until the link becomes active again */
- if (dw_pcie_wait_for_link(pci))
- dev_err(pci->dev, "Link not up after reconfiguration\n");
-}
-
-static int armada8k_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
-
- dw_pcie_setup_rc(pp);
- armada8k_pcie_establish_link(pcie);
-
return 0;
}
@@ -269,6 +261,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = armada8k_pcie_link_up,
+ .start_link = armada8k_pcie_start_link,
};
static int armada8k_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 929448e9e0bc..597c282f586c 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -328,10 +328,6 @@ static int artpec6_pcie_host_init(struct pcie_port *pp)
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
- dw_pcie_setup_rc(pp);
- artpec6_pcie_establish_link(pci);
- dw_pcie_wait_for_link(pci);
- dw_pcie_msi_init(pp);
return 0;
}
@@ -340,31 +336,6 @@ static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
.host_init = artpec6_pcie_host_init,
};
-static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- pp->ops = &artpec6_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -403,38 +374,6 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
.raise_irq = artpec6_pcie_raise_irq,
};
-static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = artpec6_pcie->pci;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
-
- return 0;
-}
-
static int artpec6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -469,10 +408,6 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
artpec6_pcie->variant = variant;
artpec6_pcie->mode = mode;
- pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi");
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
artpec6_pcie->phy_base =
devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(artpec6_pcie->phy_base))
@@ -491,7 +426,9 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
return -ENODEV;
- ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
+ pci->pp.ops = &artpec6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
break;
@@ -504,9 +441,10 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_DEVICE_TYPE_MASK;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
- ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
- if (ret < 0)
- return ret;
+
+ pci->ep.ops = &pcie_ep_ops;
+
+ return dw_pcie_ep_init(&pci->ep);
break;
}
default:
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index ad7da4ea43a5..bcd1cd9ba8c8 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -7,6 +7,7 @@
*/
#include <linux/of.h>
+#include <linux/platform_device.h>
#include "pcie-designware.h"
#include <linux/pci-epc.h>
@@ -160,8 +161,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
- if (free_win >= ep->num_ib_windows) {
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
@@ -186,8 +187,8 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
- if (free_win >= ep->num_ob_windows) {
+ free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
+ if (free_win >= pci->num_ob_windows) {
dev_err(pci->dev, "No free outbound window\n");
return -EINVAL;
}
@@ -263,8 +264,9 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 *atu_index)
{
u32 index;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < ep->num_ob_windows; index++) {
+ for (index = 0; index < pci->num_ob_windows; index++) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -676,55 +678,57 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
int ret;
void *addr;
u8 func_no;
+ struct resource *res;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
const struct pci_epc_features *epc_features;
struct dw_pcie_ep_func *ep_func;
INIT_LIST_HEAD(&ep->func_list);
- if (!pci->dbi_base || !pci->dbi_base2) {
- dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
- return -EINVAL;
+ if (!pci->dbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
}
- ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
- if (ret < 0) {
- dev_err(dev, "Unable to read *num-ib-windows* property\n");
- return ret;
- }
- if (ep->num_ib_windows > MAX_IATU_IN) {
- dev_err(dev, "Invalid *num-ib-windows*\n");
- return -EINVAL;
+ if (!pci->dbi_base2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ if (!res)
+ pci->dbi_base2 = pci->dbi_base + SZ_4K;
+ else {
+ pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+ }
}
- ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
- if (ret < 0) {
- dev_err(dev, "Unable to read *num-ob-windows* property\n");
- return ret;
- }
- if (ep->num_ob_windows > MAX_IATU_OUT) {
- dev_err(dev, "Invalid *num-ob-windows*\n");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
return -EINVAL;
- }
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
ep->ib_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(ep->num_ib_windows),
+ BITS_TO_LONGS(pci->num_ib_windows),
sizeof(long),
GFP_KERNEL);
if (!ep->ib_window_map)
return -ENOMEM;
ep->ob_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(ep->num_ob_windows),
+ BITS_TO_LONGS(pci->num_ob_windows),
sizeof(long),
GFP_KERNEL);
if (!ep->ob_window_map)
return -ENOMEM;
- addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
GFP_KERNEL);
if (!addr)
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 44c2a6572199..8a84c005f32b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -256,7 +256,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return 0;
}
-void dw_pcie_free_msi(struct pcie_port *pp)
+static void dw_pcie_free_msi(struct pcie_port *pp)
{
if (pp->msi_irq) {
irq_set_chained_handler(pp->msi_irq, NULL);
@@ -275,19 +275,18 @@ void dw_pcie_free_msi(struct pcie_port *pp)
}
}
-void dw_pcie_msi_init(struct pcie_port *pp)
+static void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
- if (!IS_ENABLED(CONFIG_PCI_MSI))
+ if (!pci_msi_enabled() || !pp->has_msi_ctrl)
return;
/* Program the msi_data */
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
-EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
int dw_pcie_host_init(struct pcie_port *pp)
{
@@ -310,6 +309,13 @@ int dw_pcie_host_init(struct pcie_port *pp)
dev_err(dev, "Missing *config* reg space\n");
}
+ if (!pci->dbi_base) {
+ struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ }
+
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -350,44 +356,50 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
- if (ret)
- pci->num_viewport = 2;
-
if (pci->link_gen < 1)
pci->link_gen = of_pci_get_max_link_speed(np);
if (pci_msi_enabled()) {
- /*
- * If a specific SoC driver needs to change the
- * default number of vectors, it needs to implement
- * the set_num_vectors callback.
- */
- if (!pp->ops->set_num_vectors) {
+ pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
+ of_property_read_bool(np, "msi-parent") ||
+ of_property_read_bool(np, "msi-map"));
+
+ if (!pp->num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
- } else {
- pp->ops->set_num_vectors(pp);
-
- if (pp->num_vectors > MAX_MSI_IRQS ||
- pp->num_vectors == 0) {
- dev_err(dev,
- "Invalid number of vectors\n");
- return -EINVAL;
- }
+ } else if (pp->num_vectors > MAX_MSI_IRQS) {
+ dev_err(dev, "Invalid number of vectors\n");
+ return -EINVAL;
}
- if (!pp->ops->msi_host_init) {
+ if (pp->ops->msi_host_init) {
+ ret = pp->ops->msi_host_init(pp);
+ if (ret < 0)
+ return ret;
+ } else if (pp->has_msi_ctrl) {
+ if (!pp->msi_irq) {
+ pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq < 0) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (pp->msi_irq < 0)
+ return pp->msi_irq;
+ }
+ }
+
pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
ret = dw_pcie_allocate_domains(pp);
if (ret)
return ret;
- if (pp->msi_irq)
+ if (pp->msi_irq > 0)
irq_set_chained_handler_and_data(pp->msi_irq,
dw_chained_msi_isr,
pp);
+ ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
+ if (ret)
+ dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+
pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
sizeof(pp->msi_msg),
DMA_FROM_DEVICE,
@@ -397,10 +409,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->msi_data = 0;
goto err_free_msi;
}
- } else {
- ret = pp->ops->msi_host_init(pp);
- if (ret < 0)
- return ret;
}
}
@@ -414,6 +422,18 @@ int dw_pcie_host_init(struct pcie_port *pp)
goto err_free_msi;
}
+ dw_pcie_setup_rc(pp);
+ dw_pcie_msi_init(pp);
+
+ if (!dw_pcie_link_up(pci) && pci->ops->start_link) {
+ ret = pci->ops->start_link(pci);
+ if (ret)
+ goto err_free_msi;
+ }
+
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
+
bridge->sysdata = pp;
ret = pci_host_probe(bridge);
@@ -421,7 +441,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
return 0;
err_free_msi:
- if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
return ret;
}
@@ -431,7 +451,7 @@ void dw_pcie_host_deinit(struct pcie_port *pp)
{
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
- if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
@@ -464,9 +484,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
type = PCIE_ATU_TYPE_CFG1;
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- type, pp->cfg0_base,
- busdev, pp->cfg0_size);
+ dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
return pp->va_cfg0_base + where;
}
@@ -480,9 +498,8 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
ret = pci_generic_config_read(bus, devfn, where, size, val);
- if (!ret && pci->num_viewport <= 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_IO, pp->io_base,
+ if (!ret && pci->io_cfg_atu_shared)
+ dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
return ret;
@@ -497,9 +514,8 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
ret = pci_generic_config_write(bus, devfn, where, size, val);
- if (!ret && pci->num_viewport <= 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_IO, pp->io_base,
+ if (!ret && pci->io_cfg_atu_shared)
+ dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
return ret;
@@ -531,6 +547,7 @@ static struct pci_ops dw_pcie_ops = {
void dw_pcie_setup_rc(struct pcie_port *pp)
{
+ int i;
u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -542,7 +559,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_setup(pci);
- if (pci_msi_enabled() && !pp->ops->msi_host_init) {
+ if (pp->has_msi_ctrl) {
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
@@ -580,27 +597,45 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ /* Ensure all outbound windows are disabled so there are multiple matches */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
+
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
* ATU, so we should not program the ATU here.
*/
if (pp->bridge->child_ops == &dw_child_pcie_ops) {
- struct resource_entry *tmp, *entry = NULL;
+ int atu_idx = 0;
+ struct resource_entry *entry;
/* Get last memory resource entry */
- resource_list_for_each_entry(tmp, &pp->bridge->windows)
- if (resource_type(tmp->res) == IORESOURCE_MEM)
- entry = tmp;
-
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_MEM, entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
- if (pci->num_viewport > 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++atu_idx)
+ break;
+
+ dw_pcie_prog_outbound_atu(pci, atu_idx,
+ PCIE_ATU_TYPE_MEM, entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++atu_idx)
+ dw_pcie_prog_outbound_atu(pci, atu_idx,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
+ else
+ pci->io_cfg_atu_shared = true;
+ }
+
+ if (pci->num_ob_windows <= atu_idx)
+ dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
+ pci->num_ob_windows);
}
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index e3e300669ed5..9b397c807261 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -33,25 +33,7 @@ struct dw_plat_pcie_of_data {
static const struct of_device_id dw_plat_pcie_of_match[];
-static int dw_plat_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- dw_pcie_setup_rc(pp);
- dw_pcie_wait_for_link(pci);
- dw_pcie_msi_init(pp);
-
- return 0;
-}
-
-static void dw_plat_set_num_vectors(struct pcie_port *pp)
-{
- pp->num_vectors = MAX_MSI_IRQS;
-}
-
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
- .host_init = dw_plat_pcie_host_init,
- .set_num_vectors = dw_plat_set_num_vectors,
};
static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
@@ -122,12 +104,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
if (pp->irq < 0)
return pp->irq;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
+ pp->num_vectors = MAX_MSI_IRQS;
pp->ops = &dw_plat_pcie_host_ops;
ret = dw_pcie_host_init(pp);
@@ -139,43 +116,11 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
return 0;
}
-static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = dw_plat_pcie->pci;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "Failed to initialize endpoint\n");
- return ret;
- }
- return 0;
-}
-
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_plat_pcie *dw_plat_pcie;
struct dw_pcie *pci;
- struct resource *res; /* Resource from DT */
int ret;
const struct of_device_id *match;
const struct dw_plat_pcie_of_data *data;
@@ -202,14 +147,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
dw_plat_pcie->pci = pci;
dw_plat_pcie->mode = mode;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- if (!res)
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- pci->dbi_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
platform_set_drvdata(pdev, dw_plat_pcie);
switch (dw_plat_pcie->mode) {
@@ -225,9 +162,8 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
return -ENODEV;
- ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
- if (ret < 0)
- return ret;
+ pci->ep.ops = &pcie_ep_ops;
+ return dw_pcie_ep_init(&pci->ep);
break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index c2dea8fc97c8..645fa1892375 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -228,7 +228,7 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
int index, int type,
u64 cpu_addr, u64 pci_addr,
- u32 size)
+ u64 size)
{
u32 retries, val;
u64 limit_addr = cpu_addr + size - 1;
@@ -245,8 +245,10 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
lower_32_bits(pci_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
upper_32_bits(pci_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
- type | PCIE_ATU_FUNC_NUM(func_no));
+ val = type | PCIE_ATU_FUNC_NUM(func_no);
+ val = upper_32_bits(size - 1) ?
+ val | PCIE_ATU_INCREASE_REGION_SIZE : val;
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_ENABLE);
@@ -267,7 +269,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
int index, int type, u64 cpu_addr,
- u64 pci_addr, u32 size)
+ u64 pci_addr, u64 size)
{
u32 retries, val;
@@ -311,7 +313,7 @@ static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
}
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u32 size)
+ u64 cpu_addr, u64 pci_addr, u64 size)
{
__dw_pcie_prog_outbound_atu(pci, 0, index, type,
cpu_addr, pci_addr, size);
@@ -544,6 +546,70 @@ static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
return 0;
}
+static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
+{
+ int max_region, i, ob = 0, ib = 0;
+ u32 val;
+
+ max_region = min((int)pci->atu_size / 512, 256);
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
+ 0x11110000);
+
+ val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
+ if (val == 0x11110000)
+ ob++;
+ else
+ break;
+ }
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
+ 0x11110000);
+
+ val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
+ if (val == 0x11110000)
+ ib++;
+ else
+ break;
+ }
+ pci->num_ib_windows = ib;
+ pci->num_ob_windows = ob;
+}
+
+static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
+{
+ int max_region, i, ob = 0, ib = 0;
+ u32 val;
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
+ if (val == 0x11110000)
+ ob++;
+ else
+ break;
+ }
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
+ if (val == 0x11110000)
+ ib++;
+ else
+ break;
+ }
+
+ pci->num_ib_windows = ib;
+ pci->num_ob_windows = ob;
+}
+
void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;
@@ -554,15 +620,30 @@ void dw_pcie_setup(struct dw_pcie *pci)
if (pci->version >= 0x480A || (!pci->version &&
dw_pcie_iatu_unroll_enabled(pci))) {
pci->iatu_unroll_enabled = true;
- if (!pci->atu_base)
- pci->atu_base =
- devm_platform_ioremap_resource_byname(pdev, "atu");
- if (IS_ERR(pci->atu_base))
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
- }
- dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
+ if (!pci->atu_base) {
+ struct resource *res =
+ platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ if (res)
+ pci->atu_size = resource_size(res);
+ pci->atu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->atu_base))
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+
+ if (!pci->atu_size)
+ /* Pick a minimal default, enough for 8 in and 8 out windows */
+ pci->atu_size = SZ_4K;
+
+ dw_pcie_iatu_detect_regions_unroll(pci);
+ } else
+ dw_pcie_iatu_detect_regions(pci);
+
+ dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
"enabled" : "disabled");
+ dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
+ pci->num_ob_windows, pci->num_ib_windows);
+
if (pci->link_gen > 0)
dw_pcie_link_set_max_speed(pci, pci->link_gen);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 9d2f511f13fa..0207840756c4 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -80,10 +80,8 @@
#define PCIE_ATU_VIEWPORT 0x900
#define PCIE_ATU_REGION_INBOUND BIT(31)
#define PCIE_ATU_REGION_OUTBOUND 0
-#define PCIE_ATU_REGION_INDEX2 0x2
-#define PCIE_ATU_REGION_INDEX1 0x1
-#define PCIE_ATU_REGION_INDEX0 0x0
#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
#define PCIE_ATU_TYPE_MEM 0x0
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
@@ -174,11 +172,11 @@ enum dw_pcie_device_mode {
struct dw_pcie_host_ops {
int (*host_init)(struct pcie_port *pp);
- void (*set_num_vectors)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp);
};
struct pcie_port {
+ bool has_msi_ctrl:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
@@ -239,8 +237,6 @@ struct dw_pcie_ep {
phys_addr_t *outbound_addr;
unsigned long *ib_window_map;
unsigned long *ob_window_map;
- u32 num_ib_windows;
- u32 num_ob_windows;
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
@@ -265,8 +261,9 @@ struct dw_pcie {
void __iomem *dbi_base2;
/* Used when iatu_unroll_enabled is true */
void __iomem *atu_base;
- u32 num_viewport;
- u8 iatu_unroll_enabled;
+ size_t atu_size;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
struct pcie_port pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
@@ -274,6 +271,8 @@ struct dw_pcie {
int num_lanes;
int link_gen;
u8 n_fts[2];
+ bool iatu_unroll_enabled: 1;
+ bool io_cfg_atu_shared: 1;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -295,7 +294,7 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
int type, u64 cpu_addr, u64 pci_addr,
- u32 size);
+ u64 size);
void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u64 pci_addr,
u32 size);
@@ -365,8 +364,6 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
#ifdef CONFIG_PCIE_DW_HOST
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
-void dw_pcie_msi_init(struct pcie_port *pp);
-void dw_pcie_free_msi(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
void dw_pcie_host_deinit(struct pcie_port *pp);
@@ -379,14 +376,6 @@ static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
return IRQ_NONE;
}
-static inline void dw_pcie_msi_init(struct pcie_port *pp)
-{
-}
-
-static inline void dw_pcie_free_msi(struct pcie_port *pp)
-{
-}
-
static inline void dw_pcie_setup_rc(struct pcie_port *pp)
{
}
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 5ca86796d43a..8fc5960faf28 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -100,7 +100,6 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
}
const struct pci_ecam_ops hisi_pcie_ops = {
- .bus_shift = 20,
.init = hisi_pcie_init,
.pci_ops = {
.map_bus = hisi_pcie_map_bus,
@@ -135,7 +134,6 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
}
static const struct pci_ecam_ops hisi_pcie_platform_ops = {
- .bus_shift = 20,
.init = hisi_pcie_platform_init,
.pci_ops = {
.map_bus = hisi_pcie_map_bus,
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index afc1abbe49aa..86f9d16c50d7 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -169,40 +169,32 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int histb_pcie_establish_link(struct pcie_port *pp)
+static int histb_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
- if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "Link already up\n");
- return 0;
- }
-
- /* PCIe RC work mode */
- regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
- regval &= ~PCIE_DEVICE_TYPE_MASK;
- regval |= PCIE_WM_RC;
- histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
-
- /* setup root complex */
- dw_pcie_setup_rc(pp);
-
/* assert LTSSM enable */
regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
regval |= PCIE_APP_LTSSM_ENABLE;
histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static int histb_pcie_host_init(struct pcie_port *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+
pp->bridge->ops = &histb_pci_ops;
- histb_pcie_establish_link(pp);
- dw_pcie_msi_init(pp);
+ /* PCIe RC work mode */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ regval &= ~PCIE_DEVICE_TYPE_MASK;
+ regval |= PCIE_WM_RC;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
return 0;
}
@@ -300,6 +292,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.read_dbi = histb_pcie_read_dbi,
.write_dbi = histb_pcie_write_dbi,
.link_up = histb_pcie_link_up,
+ .start_link = histb_pcie_start_link,
};
static int histb_pcie_probe(struct platform_device *pdev)
@@ -400,12 +393,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
return PTR_ERR(hipcie->bus_reset);
}
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
hipcie->phy = devm_phy_get(dev, "phy");
if (IS_ERR(hipcie->phy)) {
dev_info(dev, "no pcie-phy found\n");
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 5650cb78acba..0cedd1f95f37 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -58,8 +58,6 @@
struct intel_pcie_soc {
unsigned int pcie_ver;
- unsigned int pcie_atu_offset;
- u32 num_viewport;
};
struct intel_pcie_port {
@@ -153,15 +151,6 @@ static void intel_pcie_init_n_fts(struct dw_pcie *pci)
pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
}
-static void intel_pcie_rc_setup(struct intel_pcie_port *lpp)
-{
- intel_pcie_ltssm_disable(lpp);
- intel_pcie_link_setup(lpp);
- intel_pcie_init_n_fts(&lpp->pci);
- dw_pcie_setup_rc(&lpp->pci.pp);
- dw_pcie_upconfig_setup(&lpp->pci);
-}
-
static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
{
struct device *dev = lpp->pci.dev;
@@ -213,14 +202,6 @@ static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp)
gpiod_set_value_cansleep(lpp->reset_gpio, 0);
}
-static int intel_pcie_app_logic_setup(struct intel_pcie_port *lpp)
-{
- intel_pcie_device_rst_deassert(lpp);
- intel_pcie_ltssm_enable(lpp);
-
- return dw_pcie_wait_for_link(&lpp->pci);
-}
-
static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp)
{
pcie_app_wr(lpp, PCIE_APP_IRNEN, 0);
@@ -234,10 +215,6 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
struct device *dev = pci->dev;
int ret;
- pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi");
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
lpp->core_clk = devm_clk_get(dev, NULL);
if (IS_ERR(lpp->core_clk)) {
ret = PTR_ERR(lpp->core_clk);
@@ -274,11 +251,6 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
return 0;
}
-static void intel_pcie_deinit_phy(struct intel_pcie_port *lpp)
-{
- phy_exit(lpp->phy);
-}
-
static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
{
u32 value;
@@ -315,6 +287,7 @@ static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
{
int ret;
+ struct dw_pcie *pci = &lpp->pci;
intel_pcie_core_rst_assert(lpp);
intel_pcie_device_rst_assert(lpp);
@@ -331,8 +304,18 @@ static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
goto clk_err;
}
- intel_pcie_rc_setup(lpp);
- ret = intel_pcie_app_logic_setup(lpp);
+ pci->atu_base = pci->dbi_base + 0xC0000;
+
+ intel_pcie_ltssm_disable(lpp);
+ intel_pcie_link_setup(lpp);
+ intel_pcie_init_n_fts(pci);
+ dw_pcie_setup_rc(&pci->pp);
+ dw_pcie_upconfig_setup(pci);
+
+ intel_pcie_device_rst_deassert(lpp);
+ intel_pcie_ltssm_enable(lpp);
+
+ ret = dw_pcie_wait_for_link(pci);
if (ret)
goto app_init_err;
@@ -346,7 +329,7 @@ app_init_err:
clk_disable_unprepare(lpp->core_clk);
clk_err:
intel_pcie_core_rst_assert(lpp);
- intel_pcie_deinit_phy(lpp);
+ phy_exit(lpp->phy);
return ret;
}
@@ -357,7 +340,7 @@ static void __intel_pcie_remove(struct intel_pcie_port *lpp)
intel_pcie_turn_off(lpp);
clk_disable_unprepare(lpp->core_clk);
intel_pcie_core_rst_assert(lpp);
- intel_pcie_deinit_phy(lpp);
+ phy_exit(lpp->phy);
}
static int intel_pcie_remove(struct platform_device *pdev)
@@ -381,7 +364,7 @@ static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
if (ret)
return ret;
- intel_pcie_deinit_phy(lpp);
+ phy_exit(lpp->phy);
clk_disable_unprepare(lpp->core_clk);
return ret;
}
@@ -401,14 +384,6 @@ static int intel_pcie_rc_init(struct pcie_port *pp)
return intel_pcie_host_setup(lpp);
}
-/*
- * Dummy function so that DW core doesn't configure MSI
- */
-static int intel_pcie_msi_init(struct pcie_port *pp)
-{
- return 0;
-}
-
static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
{
return cpu_addr + BUS_IATU_OFFSET;
@@ -420,13 +395,10 @@ static const struct dw_pcie_ops intel_pcie_ops = {
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
.host_init = intel_pcie_rc_init,
- .msi_host_init = intel_pcie_msi_init,
};
static const struct intel_pcie_soc pcie_data = {
.pcie_ver = 0x520A,
- .pcie_atu_offset = 0xC0000,
- .num_viewport = 3,
};
static int intel_pcie_probe(struct platform_device *pdev)
@@ -461,7 +433,6 @@ static int intel_pcie_probe(struct platform_device *pdev)
pci->ops = &intel_pcie_ops;
pci->version = data->pcie_ver;
- pci->atu_base = pci->dbi_base + data->pcie_atu_offset;
pp->ops = &intel_pcie_dw_ops;
ret = dw_pcie_host_init(pp);
@@ -470,12 +441,6 @@ static int intel_pcie_probe(struct platform_device *pdev)
return ret;
}
- /*
- * Intel PCIe doesn't configure IO region, so set viewport
- * to not perform IO region access.
- */
- pci->num_viewport = data->num_viewport;
-
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d0a6a2dee6f5..026fd1e42a55 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -157,11 +157,6 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
if (IS_ERR(kirin_pcie->phy_base))
return PTR_ERR(kirin_pcie->phy_base);
- kirin_pcie->pci->dbi_base =
- devm_platform_ioremap_resource_byname(pdev, "dbi");
- if (IS_ERR(kirin_pcie->pci->dbi_base))
- return PTR_ERR(kirin_pcie->pci->dbi_base);
-
kirin_pcie->crgctrl =
syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
if (IS_ERR(kirin_pcie->crgctrl))
@@ -395,32 +390,14 @@ static int kirin_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int kirin_pcie_establish_link(struct pcie_port *pp)
+static int kirin_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- struct device *dev = kirin_pcie->pci->dev;
- int count = 0;
-
- if (kirin_pcie_link_up(pci))
- return 0;
-
- dw_pcie_setup_rc(pp);
/* assert LTSSM enable */
kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
PCIE_APP_LTSSM_ENABLE);
- /* check if the link is up or not */
- while (!kirin_pcie_link_up(pci)) {
- usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
- count++;
- if (count == 1000) {
- dev_err(dev, "Link Fail\n");
- return -EINVAL;
- }
- }
-
return 0;
}
@@ -428,9 +405,6 @@ static int kirin_pcie_host_init(struct pcie_port *pp)
{
pp->bridge->ops = &kirin_pci_ops;
- kirin_pcie_establish_link(pp);
- dw_pcie_msi_init(pp);
-
return 0;
}
@@ -438,42 +412,13 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {
.read_dbi = kirin_pcie_read_dbi,
.write_dbi = kirin_pcie_write_dbi,
.link_up = kirin_pcie_link_up,
+ .start_link = kirin_pcie_start_link,
};
static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
.host_init = kirin_pcie_host_init,
};
-static int kirin_pcie_add_msi(struct dw_pcie *pci,
- struct platform_device *pdev)
-{
- int irq;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- pci->pp.msi_irq = irq;
- }
-
- return 0;
-}
-
-static int kirin_add_pcie_port(struct dw_pcie *pci,
- struct platform_device *pdev)
-{
- int ret;
-
- ret = kirin_pcie_add_msi(pci, pdev);
- if (ret)
- return ret;
-
- pci->pp.ops = &kirin_pcie_host_ops;
-
- return dw_pcie_host_init(&pci->pp);
-}
-
static int kirin_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -496,6 +441,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &kirin_dw_pcie_ops;
+ pci->pp.ops = &kirin_pcie_host_ops;
kirin_pcie->pci = pci;
ret = kirin_pcie_get_clk(kirin_pcie, pdev);
@@ -521,7 +467,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, kirin_pcie);
- return kirin_add_pcie_port(pci, pdev);
+ return dw_pcie_host_init(&pci->pp);
}
static const struct of_device_id kirin_pcie_match[] = {
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index b4761640ffd9..affa2713bf80 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
+#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
@@ -57,6 +58,7 @@
#define PCIE20_PARF_SID_OFFSET 0x234
#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
#define PCIE20_PARF_DEVICE_TYPE 0x1000
+#define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000
#define PCIE20_ELBI_SYS_CTRL 0x04
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -97,6 +99,9 @@
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
+
+#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
+
struct qcom_pcie_resources_2_1_0 {
struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
struct reset_control *pci_reset;
@@ -179,6 +184,7 @@ struct qcom_pcie_ops {
void (*deinit)(struct qcom_pcie *pcie);
void (*post_deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
+ int (*config_sid)(struct qcom_pcie *pcie);
};
struct qcom_pcie {
@@ -207,18 +213,15 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
-static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+static int qcom_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = pcie->pci;
-
- if (dw_pcie_link_up(pci))
- return 0;
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
/* Enable Link Training state machine */
if (pcie->ops->ltssm_enable)
pcie->ops->ltssm_enable(pcie);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
@@ -1261,6 +1264,77 @@ static int qcom_pcie_link_up(struct dw_pcie *pci)
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
+static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
+{
+ /* iommu map structure */
+ struct {
+ u32 bdf;
+ u32 phandle;
+ u32 smmu_sid;
+ u32 smmu_sid_len;
+ } *map;
+ void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N;
+ struct device *dev = pcie->pci->dev;
+ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ int i, nr_map, size = 0;
+ u32 smmu_sid_base;
+
+ of_get_property(dev->of_node, "iommu-map", &size);
+ if (!size)
+ return 0;
+
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ of_property_read_u32_array(dev->of_node,
+ "iommu-map", (u32 *)map, size / sizeof(u32));
+
+ nr_map = size / (sizeof(*map));
+
+ crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
+
+ /* Registers need to be zero out first */
+ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+
+ /* Extract the SMMU SID base from the first entry of iommu-map */
+ smmu_sid_base = map[0].smmu_sid;
+
+ /* Look for an available entry to hold the mapping */
+ for (i = 0; i < nr_map; i++) {
+ u16 bdf_be = cpu_to_be16(map[i].bdf);
+ u32 val;
+ u8 hash;
+
+ hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
+ 0);
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+
+ /* If the register is already populated, look for next available entry */
+ while (val) {
+ u8 current_hash = hash++;
+ u8 next_mask = 0xff;
+
+ /* If NEXT field is NULL then update it with next hash */
+ if (!(val & next_mask)) {
+ val |= (u32)hash;
+ writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
+ }
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+ val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
+ writel(val, bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ kfree(map);
+
+ return 0;
+}
+
static int qcom_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -1283,16 +1357,16 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
goto err_disable_phy;
}
- dw_pcie_setup_rc(pp);
- dw_pcie_msi_init(pp);
-
qcom_ep_reset_deassert(pcie);
- ret = qcom_pcie_establish_link(pcie);
- if (ret)
- goto err;
+ if (pcie->ops->config_sid) {
+ ret = pcie->ops->config_sid(pcie);
+ if (ret)
+ goto err;
+ }
return 0;
+
err:
qcom_ep_reset_assert(pcie);
if (pcie->ops->post_deinit)
@@ -1361,14 +1435,25 @@ static const struct qcom_pcie_ops ops_2_7_0 = {
.post_deinit = qcom_pcie_post_deinit_2_7_0,
};
+/* Qcom IP rev.: 1.9.0 */
+static const struct qcom_pcie_ops ops_1_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .post_deinit = qcom_pcie_post_deinit_2_7_0,
+ .config_sid = qcom_pcie_config_sid_sm8250,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
+ .start_link = qcom_pcie_start_link,
};
static int qcom_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct pcie_port *pp;
struct dw_pcie *pci;
struct qcom_pcie *pcie;
@@ -1407,13 +1492,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base)) {
- ret = PTR_ERR(pci->dbi_base);
- goto err_pm_runtime_put;
- }
-
pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
if (IS_ERR(pcie->elbi)) {
ret = PTR_ERR(pcie->elbi);
@@ -1432,14 +1510,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->ops = &qcom_pcie_dw_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0) {
- ret = pp->msi_irq;
- goto err_pm_runtime_put;
- }
- }
-
ret = phy_init(pcie->phy);
if (ret) {
pm_runtime_disable(&pdev->dev);
@@ -1474,6 +1544,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
+ { .compatible = "qcom,pcie-sm8250", .data = &ops_1_9_0 },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index e348225f651f..1a9e353bef55 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -66,32 +66,10 @@ struct pcie_app_reg {
#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev)
-static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
+static int spear13xx_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
- u32 val;
- u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
-
- if (dw_pcie_link_up(pci)) {
- dev_err(pci->dev, "link already up\n");
- return 0;
- }
-
- dw_pcie_setup_rc(pp);
-
- /*
- * this controller support only 128 bytes read size, however its
- * default value in capability register is 512 bytes. So force
- * it to 128 here.
- */
- val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
- val &= ~PCI_EXP_DEVCTL_READRQ;
- dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
-
- dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
- dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
/* enable ltssm */
writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
@@ -99,7 +77,7 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
| ((u32)1 << REG_TRANSLATION_ENABLE),
&app_reg->app_ctrl_0);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
@@ -124,16 +102,12 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
{
- struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- dw_pcie_msi_init(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
writel(readl(&app_reg->int_mask) |
MSI_CTRL_INT, &app_reg->int_mask);
- }
}
static int spear13xx_pcie_link_up(struct dw_pcie *pci)
@@ -151,8 +125,23 @@ static int spear13xx_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
+
+ /*
+ * this controller support only 128 bytes read size, however its
+ * default value in capability register is 512 bytes. So force
+ * it to 128 here.
+ */
+ val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
- spear13xx_pcie_establish_link(spear13xx_pcie);
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
return 0;
@@ -183,6 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
}
pp->ops = &spear13xx_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -195,6 +185,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = spear13xx_pcie_link_up,
+ .start_link = spear13xx_pcie_start_link,
};
static int spear13xx_pcie_probe(struct platform_device *pdev)
@@ -203,7 +194,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct spear13xx_pcie *spear13xx_pcie;
struct device_node *np = dev->of_node;
- struct resource *dbi_base;
int ret;
spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
@@ -242,14 +232,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
return ret;
}
- dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
- if (IS_ERR(pci->dbi_base)) {
- ret = PTR_ERR(pci->dbi_base);
- goto fail_clk;
- }
- spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
-
if (of_property_read_bool(np, "st,pcie-is-gen1"))
pci->link_gen = 1;
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index f920e7efe118..6fa216e52d14 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -765,8 +765,6 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
- dw_pcie_msi_init(pp);
-
/* Enable MSI interrupt generation */
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
@@ -855,12 +853,18 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static void tegra_pcie_prepare_host(struct pcie_port *pp)
+static int tegra_pcie_dw_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
+ pp->bridge->ops = &tegra_pci_ops;
+
+ if (!pcie->pcie_cap_base)
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+
val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
@@ -889,6 +893,12 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
init_host_aspm(pcie);
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
@@ -899,10 +909,24 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
}
- dw_pcie_setup_rc(pp);
-
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+ return 0;
+}
+
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ u32 val, offset, speed, tmp;
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct pcie_port *pp = &pci->pp;
+ bool retry = true;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE) {
+ enable_irq(pcie->pex_rst_irq);
+ return 0;
+ }
+
+retry_link:
/* Assert RST */
val = appl_readl(pcie, APPL_PINMUX);
val &= ~APPL_PINMUX_PEX_RST;
@@ -921,19 +945,10 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
appl_writel(pcie, val, APPL_PINMUX);
msleep(100);
-}
-
-static int tegra_pcie_dw_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
- u32 val, tmp, offset, speed;
-
- pp->bridge->ops = &tegra_pci_ops;
-
- tegra_pcie_prepare_host(pp);
if (dw_pcie_wait_for_link(pci)) {
+ if (!retry)
+ return 0;
/*
* There are some endpoints which can't get the link up if
* root port has Data Link Feature (DLF) enabled.
@@ -967,10 +982,11 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp)
val &= ~PCI_DLF_EXCHANGE_ENABLE;
dw_pcie_writel_dbi(pci, offset, val);
- tegra_pcie_prepare_host(pp);
+ tegra_pcie_dw_host_init(pp);
+ dw_pcie_setup_rc(pp);
- if (dw_pcie_wait_for_link(pci))
- return 0;
+ retry = false;
+ goto retry_link;
}
speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
@@ -990,20 +1006,6 @@ static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
-{
- pp->num_vectors = MAX_MSI_IRQS;
-}
-
-static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
-{
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
-
- enable_irq(pcie->pex_rst_irq);
-
- return 0;
-}
-
static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
{
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
@@ -1019,7 +1021,6 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
.host_init = tegra_pcie_dw_host_init,
- .set_num_vectors = tegra_pcie_set_msi_vec_num,
};
static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
@@ -1061,9 +1062,16 @@ phy_exit:
static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
struct device_node *np = pcie->dev->of_node;
int ret;
+ pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ if (!pcie->dbi_res) {
+ dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
+ return -ENODEV;
+ }
+
ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
if (ret < 0) {
dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
@@ -1390,15 +1398,6 @@ static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
reset_control_deassert(pcie->core_rst);
- pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
- PCI_CAP_ID_EXP);
-
- /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
- if (!pcie->supports_clkreq) {
- disable_aspm_l11(pcie);
- disable_aspm_l12(pcie);
- }
-
return ret;
fail_phy:
@@ -1415,43 +1414,32 @@ fail_slot_reg_en:
return ret;
}
-static int __deinit_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
{
int ret;
ret = reset_control_assert(pcie->core_rst);
- if (ret) {
- dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
tegra_pcie_disable_phy(pcie);
ret = reset_control_assert(pcie->core_apb_rst);
- if (ret) {
+ if (ret)
dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
- return ret;
- }
clk_disable_unprepare(pcie->core_clk);
ret = regulator_disable(pcie->pex_ctl_supply);
- if (ret) {
+ if (ret)
dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
- return ret;
- }
tegra_pcie_disable_slot_regulators(pcie);
ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
- if (ret) {
+ if (ret)
dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
pcie->cid, ret);
- return ret;
- }
-
- return ret;
}
static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
@@ -1475,7 +1463,8 @@ static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
return 0;
fail_host_init:
- return __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
}
static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
@@ -1516,6 +1505,14 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
data &= ~APPL_PINMUX_PEX_RST;
appl_writel(pcie, data, APPL_PINMUX);
+ /*
+ * Some cards do not go to detect state even after de-asserting
+ * PERST#. So, de-assert LTSSM to bring link to detect state.
+ */
+ data = readl(pcie->appl_base + APPL_CTRL);
+ data &= ~APPL_CTRL_LTSSM_EN;
+ writel(data, pcie->appl_base + APPL_CTRL);
+
err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
data,
((data &
@@ -1523,14 +1520,8 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
APPL_DEBUG_LTSSM_STATE_SHIFT) ==
LTSSM_STATE_PRE_DETECT,
1, LTSSM_TIMEOUT);
- if (err) {
+ if (err)
dev_info(pcie->dev, "Link didn't go to detect state\n");
- } else {
- /* Disable LTSSM after link is in detect state */
- data = appl_readl(pcie, APPL_CTRL);
- data &= ~APPL_CTRL_LTSSM_EN;
- appl_writel(pcie, data, APPL_CTRL);
- }
}
/*
* DBI registers may not be accessible after this as PLL-E would be
@@ -1544,30 +1535,20 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
appl_writel(pcie, data, APPL_PINMUX);
}
-static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
{
tegra_pcie_downstream_dev_to_D0(pcie);
dw_pcie_host_deinit(&pcie->pci.pp);
tegra_pcie_dw_pme_turnoff(pcie);
-
- return __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
}
static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
- struct pcie_port *pp = &pcie->pci.pp;
struct device *dev = pcie->dev;
char *name;
int ret;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = of_irq_get_byname(dev->of_node, "msi");
- if (!pp->msi_irq) {
- dev_err(dev, "Failed to get MSI interrupt\n");
- return -ENODEV;
- }
- }
-
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
@@ -1583,7 +1564,11 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
goto fail_pm_get_sync;
}
- tegra_pcie_init_controller(pcie);
+ ret = tegra_pcie_init_controller(pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize controller: %d\n", ret);
+ goto fail_pm_get_sync;
+ }
pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
if (!pcie->link_state) {
@@ -1907,19 +1892,12 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
struct dw_pcie *pci = &pcie->pci;
struct device *dev = pcie->dev;
struct dw_pcie_ep *ep;
- struct resource *res;
char *name;
int ret;
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
ep->page_size = SZ_64K;
ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
@@ -1982,7 +1960,6 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
struct tegra_pcie_dw *pcie;
- struct resource *dbi_res;
struct pcie_port *pp;
struct dw_pcie *pci;
struct phy **phys;
@@ -2001,8 +1978,10 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pci->ops = &tegra_dw_pcie_ops;
pci->n_fts[0] = N_FTS_VAL;
pci->n_fts[1] = FTS_VAL;
+ pci->version = 0x490A;
pp = &pci->pp;
+ pp->num_vectors = MAX_MSI_IRQS;
pcie->dev = &pdev->dev;
pcie->mode = (enum dw_pcie_device_mode)data->mode;
@@ -2091,20 +2070,6 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pcie->phys = phys;
- dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- if (!dbi_res) {
- dev_err(dev, "Failed to find \"dbi\" region\n");
- return -ENODEV;
- }
- pcie->dbi_res = dbi_res;
-
- pci->dbi_base = devm_ioremap_resource(dev, dbi_res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /* Tegra HW locates DBI2 at a fixed offset from DBI */
- pci->dbi_base2 = pci->dbi_base + 0x1000;
-
atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"atu_dma");
if (!atu_dma_res) {
@@ -2113,6 +2078,7 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
}
pcie->atu_dma_res = atu_dma_res;
+ pci->atu_size = resource_size(atu_dma_res);
pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
@@ -2225,8 +2191,9 @@ static int tegra_pcie_dw_suspend_noirq(struct device *dev)
PORT_LOGIC_MSI_CTRL_INT_0_EN);
tegra_pcie_downstream_dev_to_D0(pcie);
tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
- return __deinit_controller(pcie);
+ return 0;
}
static int tegra_pcie_dw_resume_noirq(struct device *dev)
@@ -2247,6 +2214,10 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
goto fail_host_init;
}
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
+ if (ret < 0)
+ goto fail_host_init;
+
/* Restore MSI interrupt vector */
dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
pcie->msi_ctrl_int);
@@ -2254,7 +2225,8 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
return 0;
fail_host_init:
- return __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
}
static int tegra_pcie_dw_resume_early(struct device *dev)
@@ -2292,7 +2264,7 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
disable_irq(pcie->pci.pp.msi_irq);
tegra_pcie_dw_pme_turnoff(pcie);
- __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
}
static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 148355960061..69810c6b0d58 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -218,35 +218,6 @@ static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
.get_features = uniphier_pcie_get_features,
};
-static int uniphier_add_pcie_ep(struct uniphier_pcie_ep_priv *priv,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = &priv->pci;
- struct dw_pcie_ep *ep = &pci->ep;
- struct device *dev = &pdev->dev;
- struct resource *res;
- int ret;
-
- ep->ops = &uniphier_pcie_ep_ops;
-
- pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret)
- dev_err(dev, "Failed to initialize endpoint (%d)\n", ret);
-
- return ret;
-}
-
static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
{
int ret;
@@ -300,7 +271,6 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_pcie_ep_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -314,11 +284,6 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
priv->pci.dev = dev;
priv->pci.ops = &dw_pcie_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(priv->pci.dbi_base))
- return PTR_ERR(priv->pci.dbi_base);
-
priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -352,7 +317,8 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
- return uniphier_add_pcie_ep(priv, pdev);
+ priv->pci.ep.ops = &uniphier_pcie_ep_ops;
+ return dw_pcie_ep_init(&priv->pci.ep);
}
static const struct pci_epc_features uniphier_pro5_data = {
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 48176265c867..7e8bad326770 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -146,16 +146,13 @@ static int uniphier_pcie_link_up(struct dw_pcie *pci)
return (val & mask) == mask;
}
-static int uniphier_pcie_establish_link(struct dw_pcie *pci)
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
{
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
- if (dw_pcie_link_up(pci))
- return 0;
-
uniphier_pcie_ltssm_enable(priv, true);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static void uniphier_pcie_stop_link(struct dw_pcie *pci)
@@ -317,13 +314,6 @@ static int uniphier_pcie_host_init(struct pcie_port *pp)
uniphier_pcie_irq_enable(priv);
- dw_pcie_setup_rc(pp);
- ret = uniphier_pcie_establish_link(pci);
- if (ret)
- return ret;
-
- dw_pcie_msi_init(pp);
-
return 0;
}
@@ -331,31 +321,6 @@ static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
.host_init = uniphier_pcie_host_init,
};
-static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = &priv->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- pp->ops = &uniphier_pcie_host_ops;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "Failed to initialize host (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
{
int ret;
@@ -391,7 +356,7 @@ out_clk_disable:
}
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = uniphier_pcie_establish_link,
+ .start_link = uniphier_pcie_start_link,
.stop_link = uniphier_pcie_stop_link,
.link_up = uniphier_pcie_link_up,
};
@@ -400,7 +365,6 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_pcie_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -410,11 +374,6 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
priv->pci.dev = dev;
priv->pci.ops = &dw_pcie_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(priv->pci.dbi_base))
- return PTR_ERR(priv->pci.dbi_base);
-
priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -437,7 +396,9 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- return uniphier_add_pcie_port(priv, pdev);
+ priv->pci.pp.ops = &uniphier_pcie_host_ops;
+
+ return dw_pcie_host_init(&priv->pci.pp);
}
static const struct of_device_id uniphier_pcie_match[] = {
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index a2632d02ce8f..c637de3a389b 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -306,13 +306,11 @@ int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
static void mobiveil_mask_intx_irq(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(data->irq);
- struct mobiveil_pcie *pcie;
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
struct mobiveil_root_port *rp;
unsigned long flags;
u32 mask, shifted_val;
- pcie = irq_desc_get_chip_data(desc);
rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
@@ -324,13 +322,11 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
static void mobiveil_unmask_intx_irq(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(data->irq);
- struct mobiveil_pcie *pcie;
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
struct mobiveil_root_port *rp;
unsigned long flags;
u32 shifted_val, mask;
- pcie = irq_desc_get_chip_data(desc);
rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 0be485a25327..051b48bd7985 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/init.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -164,14 +165,6 @@
#define PCIE_CONFIG_WR_TYPE0 0xa
#define PCIE_CONFIG_WR_TYPE1 0xb
-#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
-#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
-#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
-#define PCIE_CONF_REG(reg) ((reg) & 0xffc)
-#define PCIE_CONF_ADDR(bus, devfn, where) \
- (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
- PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
-
#define PIO_RETRY_CNT 500
#define PIO_RETRY_DELAY 2 /* 2 us*/
@@ -259,7 +252,14 @@ static void advk_pcie_issue_perst(struct advk_pcie *pcie)
if (!pcie->reset_gpio)
return;
- /* PERST does not work for some cards when link training is enabled */
+ /*
+ * As required by PCI Express spec (PCI Express Base Specification, REV.
+ * 4.0 PCI Express, February 19 2014, 6.6.1 Conventional Reset) a delay
+ * for at least 100ms after de-asserting PERST# signal is needed before
+ * link training is enabled. So ensure that link training is disabled
+ * prior de-asserting PERST# signal to fulfill that PCI Express spec
+ * requirement.
+ */
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
reg &= ~LINK_TRAINING_EN;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
@@ -687,7 +687,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
advk_writel(pcie, reg, PIO_CTRL);
/* Program the address registers */
- reg = PCIE_CONF_ADDR(bus->number, devfn, where);
+ reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
advk_writel(pcie, reg, PIO_ADDR_LS);
advk_writel(pcie, 0, PIO_ADDR_MS);
@@ -748,7 +748,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
advk_writel(pcie, reg, PIO_CTRL);
/* Program the address registers */
- reg = PCIE_CONF_ADDR(bus->number, devfn, where);
+ reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
advk_writel(pcie, reg, PIO_ADDR_LS);
advk_writel(pcie, 0, PIO_ADDR_MS);
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index b51977abfdf1..63865aeb636b 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -49,7 +49,6 @@ static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus,
}
static const struct pci_ecam_ops pci_dw_ecam_bus_ops = {
- .bus_shift = 20,
.pci_ops = {
.map_bus = pci_dw_ecam_map_bus,
.read = pci_generic_config_read,
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index 7e8835fee5f7..f964fd26f7e0 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -346,7 +346,6 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
}
const struct pci_ecam_ops pci_thunder_ecam_ops = {
- .bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = thunder_ecam_config_read,
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 3f847969143e..1a3f70ac61fc 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -19,6 +19,15 @@
#define PEM_CFG_WR 0x28
#define PEM_CFG_RD 0x30
+/*
+ * Enhanced Configuration Access Mechanism (ECAM)
+ *
+ * N.B. This is a non-standard platform-specific ECAM bus shift value. For
+ * standard values defined in the PCI Express Base Specification see
+ * include/linux/pci-ecam.h.
+ */
+#define THUNDER_PCIE_ECAM_BUS_SHIFT 24
+
struct thunder_pem_pci {
u32 ea_entry[3];
void __iomem *pem_reg_base;
@@ -404,7 +413,7 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
}
const struct pci_ecam_ops thunder_pem_ecam_ops = {
- .bus_shift = 24,
+ .bus_shift = THUNDER_PCIE_ECAM_BUS_SHIFT,
.init = thunder_pem_acpi_init,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -441,7 +450,7 @@ static int thunder_pem_platform_init(struct pci_config_window *cfg)
}
static const struct pci_ecam_ops pci_thunder_pem_ops = {
- .bus_shift = 24,
+ .bus_shift = THUNDER_PCIE_ECAM_BUS_SHIFT,
.init = thunder_pem_platform_init,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 8e0db84f089d..85e7c98265e8 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -257,7 +257,6 @@ static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
}
const struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
- .bus_shift = 16,
.init = xgene_v1_pcie_ecam_init,
.pci_ops = {
.map_bus = xgene_pcie_map_bus,
@@ -272,7 +271,6 @@ static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
}
const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
- .bus_shift = 16,
.init = xgene_v2_pcie_ecam_init,
.pci_ops = {
.map_bus = xgene_pcie_map_bus,
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index bea86899bd5d..d41257f43a8f 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -22,6 +22,7 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/printk.h>
#include <linux/reset.h>
#include <linux/sizes.h>
@@ -127,11 +128,7 @@
#define MSI_INT_MASK_CLR 0x14
#define PCIE_EXT_CFG_DATA 0x8000
-
#define PCIE_EXT_CFG_INDEX 0x9000
-#define PCIE_EXT_BUSNUM_SHIFT 20
-#define PCIE_EXT_SLOT_SHIFT 15
-#define PCIE_EXT_FUNC_SHIFT 12
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
@@ -695,15 +692,6 @@ static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
return dla && plu;
}
-/* Configuration space read/write support */
-static inline int brcm_pcie_cfg_index(int busnr, int devfn, int reg)
-{
- return ((PCI_SLOT(devfn) & 0x1f) << PCIE_EXT_SLOT_SHIFT)
- | ((PCI_FUNC(devfn) & 0x07) << PCIE_EXT_FUNC_SHIFT)
- | (busnr << PCIE_EXT_BUSNUM_SHIFT)
- | (reg & ~3);
-}
-
static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -716,7 +704,7 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
return PCI_SLOT(devfn) ? NULL : base + where;
/* For devices, write to the config space index register */
- idx = brcm_pcie_cfg_index(bus->number, devfn, 0);
+ idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
return base + PCIE_EXT_CFG_DATA + where;
}
@@ -893,6 +881,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
burst = 0x2; /* 512 bytes */
/* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
+ tmp = readl(base + PCIE_MISC_MISC_CTRL);
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 905e93808243..02e52f698eeb 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/msi.h>
#include <linux/clk.h>
#include <linux/module.h>
@@ -39,16 +40,8 @@
#define CFG_IND_ADDR_MASK 0x00001ffc
-#define CFG_ADDR_BUS_NUM_SHIFT 20
-#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000
-#define CFG_ADDR_DEV_NUM_SHIFT 15
-#define CFG_ADDR_DEV_NUM_MASK 0x000f8000
-#define CFG_ADDR_FUNC_NUM_SHIFT 12
-#define CFG_ADDR_FUNC_NUM_MASK 0x00007000
-#define CFG_ADDR_REG_NUM_SHIFT 2
#define CFG_ADDR_REG_NUM_MASK 0x00000ffc
-#define CFG_ADDR_CFG_TYPE_SHIFT 0
-#define CFG_ADDR_CFG_TYPE_MASK 0x00000003
+#define CFG_ADDR_CFG_TYPE_1 1
#define SYS_RC_INTX_MASK 0xf
@@ -192,8 +185,15 @@ static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
.imap_window_offset = 0x4,
},
{
- /* IARR1/IMAP1 (currently unused) */
- .type = IPROC_PCIE_IB_MAP_INVALID,
+ /* IARR1/IMAP1 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1M,
+ .region_sizes = { 8 },
+ .nr_sizes = 1,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+
},
{
/* IARR2/IMAP2 */
@@ -307,7 +307,7 @@ enum iproc_pcie_reg {
};
/* iProc PCIe PAXB BCMA registers */
-static const u16 iproc_pcie_reg_paxb_bcma[] = {
+static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -318,7 +318,7 @@ static const u16 iproc_pcie_reg_paxb_bcma[] = {
};
/* iProc PCIe PAXB registers */
-static const u16 iproc_pcie_reg_paxb[] = {
+static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -334,7 +334,7 @@ static const u16 iproc_pcie_reg_paxb[] = {
};
/* iProc PCIe PAXB v2 registers */
-static const u16 iproc_pcie_reg_paxb_v2[] = {
+static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -351,6 +351,8 @@ static const u16 iproc_pcie_reg_paxb_v2[] = {
[IPROC_PCIE_OMAP3] = 0xdf8,
[IPROC_PCIE_IARR0] = 0xd00,
[IPROC_PCIE_IMAP0] = 0xc00,
+ [IPROC_PCIE_IARR1] = 0xd08,
+ [IPROC_PCIE_IMAP1] = 0xd70,
[IPROC_PCIE_IARR2] = 0xd10,
[IPROC_PCIE_IMAP2] = 0xcc0,
[IPROC_PCIE_IARR3] = 0xe00,
@@ -363,7 +365,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = {
};
/* iProc PCIe PAXC v1 registers */
-static const u16 iproc_pcie_reg_paxc[] = {
+static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
[IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
@@ -372,7 +374,7 @@ static const u16 iproc_pcie_reg_paxc[] = {
};
/* iProc PCIe PAXC v2 registers */
-static const u16 iproc_pcie_reg_paxc_v2[] = {
+static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_MSI_GIC_MODE] = 0x050,
[IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
[IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
@@ -459,19 +461,15 @@ static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
unsigned int busno,
- unsigned int slot,
- unsigned int fn,
+ unsigned int devfn,
int where)
{
u16 offset;
u32 val;
/* EP device access */
- val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
- (slot << CFG_ADDR_DEV_NUM_SHIFT) |
- (fn << CFG_ADDR_FUNC_NUM_SHIFT) |
- (where & CFG_ADDR_REG_NUM_MASK) |
- (1 & CFG_ADDR_CFG_TYPE_MASK);
+ val = ALIGN_DOWN(PCIE_ECAM_OFFSET(busno, devfn, where), 4) |
+ CFG_ADDR_CFG_TYPE_1;
iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
@@ -574,8 +572,6 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct iproc_pcie *pcie = iproc_data(bus);
- unsigned int slot = PCI_SLOT(devfn);
- unsigned int fn = PCI_FUNC(devfn);
unsigned int busno = bus->number;
void __iomem *cfg_data_p;
unsigned int data;
@@ -590,7 +586,7 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
return ret;
}
- cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
+ cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where);
if (!cfg_data_p)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -631,13 +627,11 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
int busno, unsigned int devfn,
int where)
{
- unsigned slot = PCI_SLOT(devfn);
- unsigned fn = PCI_FUNC(devfn);
u16 offset;
/* root complex access */
if (busno == 0) {
- if (slot > 0 || fn > 0)
+ if (PCIE_ECAM_DEVFN(devfn) > 0)
return NULL;
iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
@@ -649,7 +643,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
return (pcie->base + offset);
}
- return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
+ return iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where);
}
static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
@@ -1470,6 +1464,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
struct device *dev;
int ret;
+ struct pci_dev *pdev;
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
dev = pcie->dev;
@@ -1533,6 +1528,11 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
goto err_power_off_phy;
}
+ for_each_pci_bridge(pdev, host->bus) {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
+ pcie_print_link_status(pdev);
+ }
+
return 0;
err_power_off_phy:
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index cdc0963f154e..4d1c4b24e537 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -50,9 +50,7 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
/* Structure representing the PCIe interface */
struct rcar_pcie_host {
struct rcar_pcie pcie;
- struct device *dev;
struct phy *phy;
- void __iomem *base;
struct clk *bus_clk;
struct rcar_msi msi;
int (*phy_init_fn)(struct rcar_pcie_host *host);
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 9705059523a6..f1d08a1b1591 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -157,12 +157,11 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
- u32 busdev;
+ void __iomem *addr;
- busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
+ addr = rockchip->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
- if (!IS_ALIGNED(busdev, size)) {
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
@@ -175,11 +174,11 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
AXI_WRAPPER_TYPE1_CFG);
if (size == 4) {
- *val = readl(rockchip->reg_base + busdev);
+ *val = readl(addr);
} else if (size == 2) {
- *val = readw(rockchip->reg_base + busdev);
+ *val = readw(addr);
} else if (size == 1) {
- *val = readb(rockchip->reg_base + busdev);
+ *val = readb(addr);
} else {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -191,11 +190,11 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
- u32 busdev;
+ void __iomem *addr;
- busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
- if (!IS_ALIGNED(busdev, size))
+ addr = rockchip->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (pci_is_root_bus(bus->parent))
@@ -206,11 +205,11 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
AXI_WRAPPER_TYPE1_CFG);
if (size == 4)
- writel(val, rockchip->reg_base + busdev);
+ writel(val, addr);
else if (size == 2)
- writew(val, rockchip->reg_base + busdev);
+ writew(val, addr);
else if (size == 1)
- writeb(val, rockchip->reg_base + busdev);
+ writeb(val, addr);
else
return PCIBIOS_BAD_REGISTER_NUMBER;
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index c7d0178fc8c2..1650a5087450 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
@@ -178,13 +179,6 @@
#define MIN_AXI_ADDR_BITS_PASSED 8
#define PCIE_RC_SEND_PME_OFF 0x11960
#define ROCKCHIP_VENDOR_ID 0x1d87
-#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
-#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
-#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
-#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
-#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
- (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
- PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
#define PCIE_LINK_IS_L2(x) \
(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
#define PCIE_LINK_UP(x) \
diff --git a/drivers/pci/controller/pcie-tango.c b/drivers/pci/controller/pcie-tango.c
index d093a8ce4bb1..62a061f1d62e 100644
--- a/drivers/pci/controller/pcie-tango.c
+++ b/drivers/pci/controller/pcie-tango.c
@@ -208,7 +208,6 @@ static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
}
static const struct pci_ecam_ops smp8759_ecam_ops = {
- .bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = smp8759_config_read,
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index f3cf7d61924f..07e36661bbc2 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -18,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
@@ -124,8 +125,6 @@
#define E_ECAM_CR_ENABLE BIT(0)
#define E_ECAM_SIZE_LOC GENMASK(20, 16)
#define E_ECAM_SIZE_SHIFT 16
-#define ECAM_BUS_LOC_SHIFT 20
-#define ECAM_DEV_LOC_SHIFT 12
#define NWL_ECAM_VALUE_DEFAULT 12
#define CFG_DMA_REG_BAR GENMASK(2, 0)
@@ -240,15 +239,11 @@ static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
struct nwl_pcie *pcie = bus->sysdata;
- int relbus;
if (!nwl_pcie_valid_device(bus, devfn))
return NULL;
- relbus = (bus->number << ECAM_BUS_LOC_SHIFT) |
- (devfn << ECAM_DEV_LOC_SHIFT);
-
- return pcie->ecam_base + relbus + where;
+ return pcie->ecam_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
}
/* PCIe operations */
@@ -379,13 +374,11 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
static void nwl_mask_leg_irq(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(data->irq);
- struct nwl_pcie *pcie;
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
- pcie = irq_desc_get_chip_data(desc);
mask = 1 << (data->hwirq - 1);
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
@@ -395,13 +388,11 @@ static void nwl_mask_leg_irq(struct irq_data *data)
static void nwl_unmask_leg_irq(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(data->irq);
- struct nwl_pcie *pcie;
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
- pcie = irq_desc_get_chip_data(desc);
mask = 1 << (data->hwirq - 1);
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 8523be61bba5..fa5baeb82653 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
#include "../pci.h"
@@ -86,10 +87,6 @@
/* Phy Status/Control Register definitions */
#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
-/* ECAM definitions */
-#define ECAM_BUS_NUM_SHIFT 20
-#define ECAM_DEV_NUM_SHIFT 12
-
/* Number of MSI IRQs */
#define XILINX_NUM_MSI_IRQS 128
@@ -183,15 +180,11 @@ static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
struct xilinx_pcie_port *port = bus->sysdata;
- int relbus;
if (!xilinx_pcie_valid_device(bus, devfn))
return NULL;
- relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
- (devfn << ECAM_DEV_NUM_SHIFT);
-
- return port->reg_base + relbus + where;
+ return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
}
/* PCIe operations */
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 6f8795454e5a..5e80f28f0119 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -52,6 +53,12 @@ enum vmd_features {
* vendor-specific capability space
*/
VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2),
+
+ /*
+ * Device may use MSI-X vector 0 for software triggering and will not
+ * be used for MSI remapping
+ */
+ VMD_FEAT_OFFSET_FIRST_VECTOR = (1 << 3),
};
/*
@@ -93,7 +100,7 @@ struct vmd_dev {
struct pci_dev *dev;
spinlock_t cfg_lock;
- char __iomem *cfgbar;
+ void __iomem *cfgbar;
int msix_count;
struct vmd_irq_list *irqs;
@@ -103,6 +110,7 @@ struct vmd_dev {
struct irq_domain *irq_domain;
struct pci_bus *bus;
u8 busn_start;
+ u8 first_vec;
};
static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
@@ -198,11 +206,11 @@ static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
*/
static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
{
- int i, best = 1;
unsigned long flags;
+ int i, best;
- if (vmd->msix_count == 1)
- return &vmd->irqs[0];
+ if (vmd->msix_count == 1 + vmd->first_vec)
+ return &vmd->irqs[vmd->first_vec];
/*
* White list for fast-interrupt handlers. All others will share the
@@ -212,11 +220,12 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
case PCI_CLASS_STORAGE_EXPRESS:
break;
default:
- return &vmd->irqs[0];
+ return &vmd->irqs[vmd->first_vec];
}
raw_spin_lock_irqsave(&list_lock, flags);
- for (i = 1; i < vmd->msix_count; i++)
+ best = vmd->first_vec + 1;
+ for (i = best; i < vmd->msix_count; i++)
if (vmd->irqs[i].count < vmd->irqs[best].count)
best = i;
vmd->irqs[best].count++;
@@ -324,18 +333,16 @@ static void vmd_remove_irq_domain(struct vmd_dev *vmd)
}
}
-static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
+static void __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
unsigned int devfn, int reg, int len)
{
- char __iomem *addr = vmd->cfgbar +
- ((bus->number - vmd->busn_start) << 20) +
- (devfn << 12) + reg;
+ unsigned int busnr_ecam = bus->number - vmd->busn_start;
+ u32 offset = PCIE_ECAM_OFFSET(busnr_ecam, devfn, reg);
- if ((addr - vmd->cfgbar) + len >=
- resource_size(&vmd->dev->resource[VMD_CFGBAR]))
+ if (offset + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR]))
return NULL;
- return addr;
+ return vmd->cfgbar + offset;
}
/*
@@ -346,7 +353,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
int len, u32 *value)
{
struct vmd_dev *vmd = vmd_from_bus(bus);
- char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+ void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
unsigned long flags;
int ret = 0;
@@ -381,7 +388,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
int len, u32 value)
{
struct vmd_dev *vmd = vmd_from_bus(bus);
- char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+ void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
unsigned long flags;
int ret = 0;
@@ -549,8 +556,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd)
if (vmd->msix_count < 0)
return -ENODEV;
- vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
- PCI_IRQ_MSIX);
+ vmd->msix_count = pci_alloc_irq_vectors(dev, vmd->first_vec + 1,
+ vmd->msix_count, PCI_IRQ_MSIX);
if (vmd->msix_count < 0)
return vmd->msix_count;
@@ -718,6 +725,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ unsigned long features = (unsigned long) id->driver_data;
struct vmd_dev *vmd;
int err;
@@ -742,13 +750,16 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
return -ENODEV;
+ if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
+ vmd->first_vec = 1;
+
err = vmd_alloc_irqs(vmd);
if (err)
return err;
spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
- err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
+ err = vmd_enable_domain(vmd, features);
if (err)
return err;
@@ -817,13 +828,16 @@ static const struct pci_device_id vmd_ids[] = {
VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index b54d32a31669..d2a1920bb055 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -28,6 +28,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
struct resource *cfgres, struct resource *busr,
const struct pci_ecam_ops *ops)
{
+ unsigned int bus_shift = ops->bus_shift;
struct pci_config_window *cfg;
unsigned int bus_range, bus_range_max, bsz;
struct resource *conflict;
@@ -40,20 +41,24 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
if (!cfg)
return ERR_PTR(-ENOMEM);
+ /* ECAM-compliant platforms need not supply ops->bus_shift */
+ if (!bus_shift)
+ bus_shift = PCIE_ECAM_BUS_SHIFT;
+
cfg->parent = dev;
cfg->ops = ops;
cfg->busr.start = busr->start;
cfg->busr.end = busr->end;
cfg->busr.flags = IORESOURCE_BUS;
bus_range = resource_size(&cfg->busr);
- bus_range_max = resource_size(cfgres) >> ops->bus_shift;
+ bus_range_max = resource_size(cfgres) >> bus_shift;
if (bus_range > bus_range_max) {
bus_range = bus_range_max;
cfg->busr.end = busr->start + bus_range - 1;
dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n",
cfgres, &cfg->busr, busr);
}
- bsz = 1 << ops->bus_shift;
+ bsz = 1 << bus_shift;
cfg->res.start = cfgres->start;
cfg->res.end = cfgres->end;
@@ -131,25 +136,36 @@ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
struct pci_config_window *cfg = bus->sysdata;
+ unsigned int bus_shift = cfg->ops->bus_shift;
unsigned int devfn_shift = cfg->ops->bus_shift - 8;
unsigned int busn = bus->number;
void __iomem *base;
+ u32 bus_offset, devfn_offset;
if (busn < cfg->busr.start || busn > cfg->busr.end)
return NULL;
busn -= cfg->busr.start;
- if (per_bus_mapping)
+ if (per_bus_mapping) {
base = cfg->winp[busn];
- else
- base = cfg->win + (busn << cfg->ops->bus_shift);
- return base + (devfn << devfn_shift) + where;
+ busn = 0;
+ } else
+ base = cfg->win;
+
+ if (cfg->ops->bus_shift) {
+ bus_offset = (busn & PCIE_ECAM_BUS_MASK) << bus_shift;
+ devfn_offset = (devfn & PCIE_ECAM_DEVFN_MASK) << devfn_shift;
+ where &= PCIE_ECAM_REG_MASK;
+
+ return base + (bus_offset | devfn_offset | where);
+ }
+
+ return base + PCIE_ECAM_OFFSET(busn, devfn, where);
}
EXPORT_SYMBOL_GPL(pci_ecam_map_bus);
/* ECAM ops */
const struct pci_ecam_ops pci_generic_ecam_ops = {
- .bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read,
@@ -161,7 +177,6 @@ EXPORT_SYMBOL_GPL(pci_generic_ecam_ops);
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
/* ECAM ops for 32-bit access only (non-compliant) */
const struct pci_ecam_ops pci_32b_ops = {
- .bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read32,
@@ -171,7 +186,6 @@ const struct pci_ecam_ops pci_32b_ops = {
/* ECAM ops for 32-bit read only (non-compliant) */
const struct pci_ecam_ops pci_32b_read_ops = {
- .bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read32,
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index e22d023f91d1..754c3f23282e 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -294,7 +294,6 @@ int ibmphp_configure_card(struct pci_func *func, u8 slotno)
default:
err("MAJOR PROBLEM!!!!, header type not supported? %x\n", hdr_type);
return -ENXIO;
- break;
} /* end of switch */
} /* end of valid device */
} /* end of for */
@@ -1509,7 +1508,6 @@ static int unconfigure_boot_card(struct slot *slot_cur)
default:
err("MAJOR PROBLEM!!!! Cannot read device's header\n");
return -1;
- break;
} /* end of switch */
} /* end of valid device */
} /* end of for */
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d52d118979a6..3162f88fe940 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -26,6 +26,8 @@
#include "pci.h"
+#ifdef CONFIG_PCI_MSI
+
static int pci_msi_enable = 1;
int pci_msi_ignore_mask;
@@ -410,6 +412,17 @@ static void pci_intx_for_msi(struct pci_dev *dev, int enable)
pci_intx(dev, enable);
}
+static void pci_msi_set_enable(struct pci_dev *dev, int enable)
+{
+ u16 control;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+}
+
static void __pci_restore_msi_state(struct pci_dev *dev)
{
u16 control;
@@ -432,6 +445,16 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
+static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
+{
+ u16 ctrl;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ ctrl &= ~clear;
+ ctrl |= set;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+}
+
static void __pci_restore_msix_state(struct pci_dev *dev)
{
struct msi_desc *entry;
@@ -600,11 +623,11 @@ static int msi_verify_entries(struct pci_dev *dev)
struct msi_desc *entry;
for_each_pci_msi_entry(entry, dev) {
- if (!dev->no_64bit_msi || !entry->msg.address_hi)
- continue;
- pci_err(dev, "Device has broken 64-bit MSI but arch"
- " tried to assign one above 4G\n");
- return -EIO;
+ if (entry->msg.address_hi && dev->no_64bit_msi) {
+ pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
+ entry->msg.address_hi, entry->msg.address_lo);
+ return -EIO;
+ }
}
return 0;
}
@@ -1577,3 +1600,40 @@ bool pci_dev_has_special_msi_domain(struct pci_dev *pdev)
}
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
+#endif /* CONFIG_PCI_MSI */
+
+void pci_msi_init(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ /*
+ * Disable the MSI hardware to avoid screaming interrupts
+ * during boot. This is the power on reset default so
+ * usually this should be a noop.
+ */
+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (!dev->msi_cap)
+ return;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
+ if (ctrl & PCI_MSI_FLAGS_ENABLE)
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS,
+ ctrl & ~PCI_MSI_FLAGS_ENABLE);
+
+ if (!(ctrl & PCI_MSI_FLAGS_64BIT))
+ dev->no_64bit_msi = 1;
+}
+
+void pci_msix_init(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (!dev->msix_cap)
+ return;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ if (ctrl & PCI_MSIX_FLAGS_ENABLE)
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS,
+ ctrl & ~PCI_MSIX_FLAGS_ENABLE);
+}
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index ac24cd5439a9..5ea472ae22ac 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -556,6 +556,11 @@ static int pci_parse_request_of_pci_ranges(struct device *dev,
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+
+ if (!(res->flags & IORESOURCE_PREFETCH))
+ if (upper_32_bits(resource_size(res)))
+ dev_warn(dev, "Memory resource size exceeds max for 32 bits\n");
+
break;
}
}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index de1c331dbed4..196382630363 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -556,15 +556,6 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
return -1;
for (i = 0; i < num_clients; i++) {
-#ifdef CONFIG_DMA_VIRT_OPS
- if (clients[i]->dma_ops == &dma_virt_ops) {
- if (verbose)
- dev_warn(clients[i],
- "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n");
- return -1;
- }
-#endif
-
pci_client = find_parent_pci_dev(clients[i]);
if (!pci_client) {
if (verbose)
@@ -609,7 +600,7 @@ bool pci_has_p2pmem(struct pci_dev *pdev)
EXPORT_SYMBOL_GPL(pci_has_p2pmem);
/**
- * pci_p2pmem_find - find a peer-to-peer DMA memory device compatible with
+ * pci_p2pmem_find_many - find a peer-to-peer DMA memory device compatible with
* the specified list of clients and shortest distance (as determined
* by pci_p2pmem_dma())
* @clients: array of devices to check (NULL-terminated)
@@ -674,7 +665,7 @@ struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
/**
- * pci_alloc_p2p_mem - allocate peer-to-peer DMA memory
+ * pci_alloc_p2pmem - allocate peer-to-peer DMA memory
* @pdev: the device to allocate memory from
* @size: number of bytes to allocate
*
@@ -727,7 +718,7 @@ void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
EXPORT_SYMBOL_GPL(pci_free_p2pmem);
/**
- * pci_virt_to_bus - return the PCI bus address for a given virtual
+ * pci_p2pmem_virt_to_bus - return the PCI bus address for a given virtual
* address obtained with pci_alloc_p2pmem()
* @pdev: the device the memory was allocated from
* @addr: address of the memory that was allocated
@@ -834,24 +825,10 @@ static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
struct device *dev, struct scatterlist *sg, int nents)
{
struct scatterlist *s;
- phys_addr_t paddr;
int i;
- /*
- * p2pdma mappings are not compatible with devices that use
- * dma_virt_ops. If the upper layers do the right thing
- * this should never happen because it will be prevented
- * by the check in pci_p2pdma_distance_many()
- */
-#ifdef CONFIG_DMA_VIRT_OPS
- if (WARN_ON_ONCE(dev->dma_ops == &dma_virt_ops))
- return 0;
-#endif
-
for_each_sg(sg, s, nents, i) {
- paddr = sg_phys(s);
-
- s->dma_address = paddr - p2p_pgmap->bus_offset;
+ s->dma_address = sg_phys(s) - p2p_pgmap->bus_offset;
sg_dma_len(s) = s->length;
}
@@ -859,7 +836,7 @@ static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
}
/**
- * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA
+ * pci_p2pdma_map_sg_attrs - map a PCI peer-to-peer scatterlist for DMA
* @dev: device doing the DMA request
* @sg: scatter list to map
* @nents: elements in the scatterlist
@@ -896,7 +873,7 @@ int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
/**
- * pci_p2pdma_unmap_sg - unmap a PCI peer-to-peer scatterlist that was
+ * pci_p2pdma_unmap_sg_attrs - unmap a PCI peer-to-peer scatterlist that was
* mapped with pci_p2pdma_map_sg()
* @dev: device doing the DMA request
* @sg: scatter list to map
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 745a4e0c4994..53502a751914 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -1162,14 +1162,34 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
static struct acpi_device *acpi_pci_find_companion(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct acpi_device *adev;
bool check_children;
u64 addr;
check_children = pci_is_bridge(pci_dev);
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
- return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
+ adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
check_children);
+
+ /*
+ * There may be ACPI device objects in the ACPI namespace that are
+ * children of the device object representing the host bridge, but don't
+ * represent PCI devices. Both _HID and _ADR may be present for them,
+ * even though that is against the specification (for example, see
+ * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which
+ * appears to indicate that they should not be taken into consideration
+ * as potential companions of PCI devices on the root bus.
+ *
+ * To catch this special case, disregard the returned device object if
+ * it has a valid _HID, addr is 0 and the PCI device at hand is on the
+ * root bus.
+ */
+ if (adev && adev->pnp.type.platform_id && !addr &&
+ pci_is_root_bus(pci_dev->bus))
+ return NULL;
+
+ return adev;
}
/**
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8b587fc97f7b..ec44a79e951a 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -90,7 +90,80 @@ static void pci_free_dynids(struct pci_driver *drv)
}
/**
- * store_new_id - sysfs frontend to pci_add_dynid()
+ * pci_match_id - See if a PCI device matches a given pci_id table
+ * @ids: array of PCI device ID structures to search in
+ * @dev: the PCI device structure to match against.
+ *
+ * Used by a driver to check whether a PCI device is in its list of
+ * supported devices. Returns the matching pci_device_id structure or
+ * %NULL if there is no match.
+ *
+ * Deprecated; don't use this as it will not catch any dynamic IDs
+ * that a driver might want to check for.
+ */
+const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
+ struct pci_dev *dev)
+{
+ if (ids) {
+ while (ids->vendor || ids->subvendor || ids->class_mask) {
+ if (pci_match_one_device(ids, dev))
+ return ids;
+ ids++;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_match_id);
+
+static const struct pci_device_id pci_device_id_any = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+};
+
+/**
+ * pci_match_device - See if a device matches a driver's list of IDs
+ * @drv: the PCI driver to match against
+ * @dev: the PCI device structure to match against
+ *
+ * Used by a driver to check whether a PCI device is in its list of
+ * supported devices or in the dynids list, which may have been augmented
+ * via the sysfs "new_id" file. Returns the matching pci_device_id
+ * structure or %NULL if there is no match.
+ */
+static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
+ struct pci_dev *dev)
+{
+ struct pci_dynid *dynid;
+ const struct pci_device_id *found_id = NULL;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (dev->driver_override && strcmp(dev->driver_override, drv->name))
+ return NULL;
+
+ /* Look at the dynamic ids first, before the static ones */
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry(dynid, &drv->dynids.list, node) {
+ if (pci_match_one_device(&dynid->id, dev)) {
+ found_id = &dynid->id;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ if (!found_id)
+ found_id = pci_match_id(drv->id_table, dev);
+
+ /* driver_override will always match, send a dummy id */
+ if (!found_id && dev->driver_override)
+ found_id = &pci_device_id_any;
+
+ return found_id;
+}
+
+/**
+ * new_id_store - sysfs frontend to pci_add_dynid()
* @driver: target device driver
* @buf: buffer for scanning device ID data
* @count: input size
@@ -125,7 +198,7 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
pdev->subsystem_device = subdevice;
pdev->class = class;
- if (pci_match_id(pdrv->id_table, pdev))
+ if (pci_match_device(pdrv, pdev))
retval = -EEXIST;
kfree(pdev);
@@ -158,7 +231,7 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
static DRIVER_ATTR_WO(new_id);
/**
- * store_remove_id - remove a PCI device ID from this driver
+ * remove_id_store - remove a PCI device ID from this driver
* @driver: target device driver
* @buf: buffer for scanning device ID data
* @count: input size
@@ -208,78 +281,6 @@ static struct attribute *pci_drv_attrs[] = {
};
ATTRIBUTE_GROUPS(pci_drv);
-/**
- * pci_match_id - See if a pci device matches a given pci_id table
- * @ids: array of PCI device id structures to search in
- * @dev: the PCI device structure to match against.
- *
- * Used by a driver to check whether a PCI device present in the
- * system is in its list of supported devices. Returns the matching
- * pci_device_id structure or %NULL if there is no match.
- *
- * Deprecated, don't use this as it will not catch any dynamic ids
- * that a driver might want to check for.
- */
-const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
- struct pci_dev *dev)
-{
- if (ids) {
- while (ids->vendor || ids->subvendor || ids->class_mask) {
- if (pci_match_one_device(ids, dev))
- return ids;
- ids++;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(pci_match_id);
-
-static const struct pci_device_id pci_device_id_any = {
- .vendor = PCI_ANY_ID,
- .device = PCI_ANY_ID,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
-};
-
-/**
- * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
- * @drv: the PCI driver to match against
- * @dev: the PCI device structure to match against
- *
- * Used by a driver to check whether a PCI device present in the
- * system is in its list of supported devices. Returns the matching
- * pci_device_id structure or %NULL if there is no match.
- */
-static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
- struct pci_dev *dev)
-{
- struct pci_dynid *dynid;
- const struct pci_device_id *found_id = NULL;
-
- /* When driver_override is set, only bind to the matching driver */
- if (dev->driver_override && strcmp(dev->driver_override, drv->name))
- return NULL;
-
- /* Look at the dynamic ids first, before the static ones */
- spin_lock(&drv->dynids.lock);
- list_for_each_entry(dynid, &drv->dynids.list, node) {
- if (pci_match_one_device(&dynid->id, dev)) {
- found_id = &dynid->id;
- break;
- }
- }
- spin_unlock(&drv->dynids.lock);
-
- if (!found_id)
- found_id = pci_match_id(drv->id_table, dev);
-
- /* driver_override will always match, send a dummy id */
- if (!found_id && dev->driver_override)
- found_id = &pci_device_id_any;
-
- return found_id;
-}
-
struct drv_dev_and_id {
struct pci_driver *drv;
struct pci_dev *dev;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index d15c881e2e7e..fb072f4b3176 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -124,6 +124,15 @@ static ssize_t cpulistaffinity_show(struct device *dev,
}
static DEVICE_ATTR_RO(cpulistaffinity);
+static ssize_t power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%s\n", pci_power_name(pdev->current_state));
+}
+static DEVICE_ATTR_RO(power_state);
+
/* show resources */
static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -581,6 +590,7 @@ static ssize_t driver_override_show(struct device *dev,
static DEVICE_ATTR_RW(driver_override);
static struct attribute *pci_dev_attrs[] = {
+ &dev_attr_power_state.attr,
&dev_attr_resource.attr,
&dev_attr_vendor.attr,
&dev_attr_device.attr,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e578d34095e9..790393d1e318 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -399,8 +399,8 @@ found:
return 1;
}
-static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
- u8 pos, int cap, int *ttl)
+static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
+ u8 pos, int cap, int *ttl)
{
u8 id;
u16 ent;
@@ -423,22 +423,22 @@ static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
return 0;
}
-static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
- u8 pos, int cap)
+static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
+ u8 pos, int cap)
{
int ttl = PCI_FIND_CAP_TTL;
return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
}
-int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
+u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
{
return __pci_find_next_cap(dev->bus, dev->devfn,
pos + PCI_CAP_LIST_NEXT, cap);
}
EXPORT_SYMBOL_GPL(pci_find_next_capability);
-static int __pci_bus_find_cap_start(struct pci_bus *bus,
+static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
unsigned int devfn, u8 hdr_type)
{
u16 status;
@@ -477,9 +477,9 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
* %PCI_CAP_ID_PCIX PCI-X
* %PCI_CAP_ID_EXP PCI Express
*/
-int pci_find_capability(struct pci_dev *dev, int cap)
+u8 pci_find_capability(struct pci_dev *dev, int cap)
{
- int pos;
+ u8 pos;
pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
if (pos)
@@ -502,10 +502,9 @@ EXPORT_SYMBOL(pci_find_capability);
* device's PCI configuration space or 0 in case the device does not
* support it.
*/
-int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
+u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
{
- int pos;
- u8 hdr_type;
+ u8 hdr_type, pos;
pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
@@ -528,11 +527,11 @@ EXPORT_SYMBOL(pci_bus_find_capability);
* not support it. Some capabilities can occur several times, e.g., the
* vendor-specific capability, and this provides a way to find them all.
*/
-int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
+u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
{
u32 header;
int ttl;
- int pos = PCI_CFG_SPACE_SIZE;
+ u16 pos = PCI_CFG_SPACE_SIZE;
/* minimum 8 bytes per capability */
ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
@@ -583,7 +582,7 @@ EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
* %PCI_EXT_CAP_ID_DSN Device Serial Number
* %PCI_EXT_CAP_ID_PWR Power Budgeting
*/
-int pci_find_ext_capability(struct pci_dev *dev, int cap)
+u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
{
return pci_find_next_ext_capability(dev, 0, cap);
}
@@ -623,7 +622,7 @@ u64 pci_get_dsn(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_get_dsn);
-static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
+static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
{
int rc, ttl = PCI_FIND_CAP_TTL;
u8 cap, mask;
@@ -650,11 +649,12 @@ static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
return 0;
}
+
/**
- * pci_find_next_ht_capability - query a device's Hypertransport capabilities
+ * pci_find_next_ht_capability - query a device's HyperTransport capabilities
* @dev: PCI device to query
* @pos: Position from which to continue searching
- * @ht_cap: Hypertransport capability code
+ * @ht_cap: HyperTransport capability code
*
* To be used in conjunction with pci_find_ht_capability() to search for
* all capabilities matching @ht_cap. @pos should always be a value returned
@@ -663,26 +663,26 @@ static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
* NB. To be 100% safe against broken PCI devices, the caller should take
* steps to avoid an infinite loop.
*/
-int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
+u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
{
return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
}
EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
/**
- * pci_find_ht_capability - query a device's Hypertransport capabilities
+ * pci_find_ht_capability - query a device's HyperTransport capabilities
* @dev: PCI device to query
- * @ht_cap: Hypertransport capability code
+ * @ht_cap: HyperTransport capability code
*
- * Tell if a device supports a given Hypertransport capability.
+ * Tell if a device supports a given HyperTransport capability.
* Returns an address within the device's PCI configuration space
* or 0 in case the device does not support the request capability.
* The address points to the PCI capability, of type PCI_CAP_ID_HT,
- * which has a Hypertransport capability matching @ht_cap.
+ * which has a HyperTransport capability matching @ht_cap.
*/
-int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
+u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
{
- int pos;
+ u8 pos;
pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
if (pos)
@@ -1174,26 +1174,20 @@ int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
}
EXPORT_SYMBOL_GPL(pci_platform_power_transition);
-/**
- * pci_wakeup - Wake up a PCI device
- * @pci_dev: Device to handle.
- * @ign: ignored parameter
- */
-static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
+static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
{
- pci_wakeup_event(pci_dev);
pm_request_resume(&pci_dev->dev);
return 0;
}
/**
- * pci_wakeup_bus - Walk given bus and wake up devices on it
+ * pci_resume_bus - Walk given bus and runtime resume devices on it
* @bus: Top bus of the subtree to walk.
*/
-void pci_wakeup_bus(struct pci_bus *bus)
+void pci_resume_bus(struct pci_bus *bus)
{
if (bus)
- pci_walk_bus(bus, pci_wakeup, NULL);
+ pci_walk_bus(bus, pci_resume_one, NULL);
}
static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
@@ -1256,7 +1250,7 @@ int pci_power_up(struct pci_dev *dev)
* may be powered on into D0uninitialized state, resume them to
* give them a chance to suspend again
*/
- pci_wakeup_bus(dev->subordinate);
+ pci_resume_bus(dev->subordinate);
}
return pci_raw_set_power_state(dev, PCI_D0);
@@ -1566,6 +1560,7 @@ int pci_save_state(struct pci_dev *dev)
pci_save_ltr_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
+ pci_save_ptm_state(dev);
return pci_save_vc_state(dev);
}
EXPORT_SYMBOL(pci_save_state);
@@ -1677,6 +1672,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_vc_state(dev);
pci_restore_rebar_state(dev);
pci_restore_dpc_state(dev);
+ pci_restore_ptm_state(dev);
pci_aer_clear_status(dev);
pci_restore_aer_state(dev);
@@ -2606,12 +2602,24 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
+ /*
+ * There are systems (for example, Intel mobile chips since Coffee
+ * Lake) where the power drawn while suspended can be significantly
+ * reduced by disabling PTM on PCIe root ports as this allows the
+ * port to enter a lower-power PM state and the SoC to reach a
+ * lower-power idle state as a whole.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ pci_disable_ptm(dev);
+
pci_enable_wake(dev, target_state, wakeup);
error = pci_set_power_state(dev, target_state);
- if (error)
+ if (error) {
pci_enable_wake(dev, target_state, false);
+ pci_restore_ptm_state(dev);
+ }
return error;
}
@@ -2649,12 +2657,23 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
dev->runtime_d3cold = target_state == PCI_D3cold;
+ /*
+ * There are systems (for example, Intel mobile chips since Coffee
+ * Lake) where the power drawn while suspended can be significantly
+ * reduced by disabling PTM on PCIe root ports as this allows the
+ * port to enter a lower-power PM state and the SoC to reach a
+ * lower-power idle state as a whole.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ pci_disable_ptm(dev);
+
__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
if (error) {
pci_enable_wake(dev, target_state, false);
+ pci_restore_ptm_state(dev);
dev->runtime_d3cold = false;
}
@@ -3480,7 +3499,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
}
/**
- * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
+ * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
* @start: starting downstream device
* @end: ending upstream device or NULL to search to the root bus
* @acs_flags: required flags
@@ -4188,7 +4207,14 @@ void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
}
size = resource_size(res);
- name = res->name ?: dev_name(dev);
+
+ if (res->name)
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
+ res->name);
+ else
+ name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!name)
+ return IOMEM_ERR_PTR(-ENOMEM);
if (!devm_request_mem_region(dev, res->start, size, name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
@@ -4317,7 +4343,7 @@ int pci_set_cacheline_size(struct pci_dev *dev)
if (cacheline_size == pci_cache_line_size)
return 0;
- pci_info(dev, "cache line size of %d is not supported\n",
+ pci_dbg(dev, "cache line size of %d is not supported\n",
pci_cache_line_size << 2);
return -EINVAL;
@@ -6202,19 +6228,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
while (*p) {
count = 0;
if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
- p[count] == '@') {
+ p[count] == '@') {
p += count + 1;
+ if (align_order > 63) {
+ pr_err("PCI: Invalid requested alignment (order %d)\n",
+ align_order);
+ align_order = PAGE_SHIFT;
+ }
} else {
- align_order = -1;
+ align_order = PAGE_SHIFT;
}
ret = pci_dev_str_match(dev, p, &p);
if (ret == 1) {
*resize = true;
- if (align_order == -1)
- align = PAGE_SIZE;
- else
- align = 1 << align_order;
+ align = 1ULL << align_order;
break;
} else if (ret < 0) {
pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f86cae9aa1f4..a7bdf0b1d45d 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -104,6 +104,8 @@ void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
+void pci_msi_init(struct pci_dev *dev);
+void pci_msix_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
@@ -185,27 +187,6 @@ void pci_no_msi(void);
static inline void pci_no_msi(void) { }
#endif
-static inline void pci_msi_set_enable(struct pci_dev *dev, int enable)
-{
- u16 control;
-
- pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
- control &= ~PCI_MSI_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
-}
-
-static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
-{
- u16 ctrl;
-
- pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
- ctrl &= ~clear;
- ctrl |= set;
- pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
-}
-
void pci_realloc_get_opt(char *);
static inline int pci_no_d1d2(struct pci_dev *dev)
@@ -294,7 +275,8 @@ void pci_bus_put(struct pci_bus *bus);
/* PCIe link information from Link Capabilities 2 */
#define PCIE_LNKCAP2_SLS2SPEED(lnkcap2) \
- ((lnkcap2) & PCI_EXP_LNKCAP2_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
+ ((lnkcap2) & PCI_EXP_LNKCAP2_SLS_64_0GB ? PCIE_SPEED_64_0GT : \
+ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
(lnkcap2) & PCI_EXP_LNKCAP2_SLS_16_0GB ? PCIE_SPEED_16_0GT : \
(lnkcap2) & PCI_EXP_LNKCAP2_SLS_8_0GB ? PCIE_SPEED_8_0GT : \
(lnkcap2) & PCI_EXP_LNKCAP2_SLS_5_0GB ? PCIE_SPEED_5_0GT : \
@@ -303,7 +285,8 @@ void pci_bus_put(struct pci_bus *bus);
/* PCIe speed to Mb/s reduced by encoding overhead */
#define PCIE_SPEED2MBS_ENC(speed) \
- ((speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \
+ ((speed) == PCIE_SPEED_64_0GT ? 64000*128/130 : \
+ (speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \
(speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
(speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \
(speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \
@@ -448,6 +431,15 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
#endif /* CONFIG_PCIEAER */
+#ifdef CONFIG_PCIEPORTBUS
+/* Cached RCEC Endpoint Association */
+struct rcec_ea {
+ u8 nextbusn;
+ u8 lastbusn;
+ u32 bitmap;
+};
+#endif
+
#ifdef CONFIG_PCIE_DPC
void pci_save_dpc_state(struct pci_dev *dev);
void pci_restore_dpc_state(struct pci_dev *dev);
@@ -460,6 +452,22 @@ static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
static inline void pci_dpc_init(struct pci_dev *pdev) {}
#endif
+#ifdef CONFIG_PCIEPORTBUS
+void pci_rcec_init(struct pci_dev *dev);
+void pci_rcec_exit(struct pci_dev *dev);
+void pcie_link_rcec(struct pci_dev *rcec);
+void pcie_walk_rcec(struct pci_dev *rcec,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata);
+#else
+static inline void pci_rcec_init(struct pci_dev *dev) {}
+static inline void pci_rcec_exit(struct pci_dev *dev) {}
+static inline void pcie_link_rcec(struct pci_dev *rcec) {}
+static inline void pcie_walk_rcec(struct pci_dev *rcec,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata) {}
+#endif
+
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
void pci_ats_init(struct pci_dev *dev);
@@ -516,6 +524,16 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
#endif /* CONFIG_PCI_IOV */
+#ifdef CONFIG_PCIE_PTM
+void pci_save_ptm_state(struct pci_dev *dev);
+void pci_restore_ptm_state(struct pci_dev *dev);
+void pci_disable_ptm(struct pci_dev *dev);
+#else
+static inline void pci_save_ptm_state(struct pci_dev *dev) { }
+static inline void pci_restore_ptm_state(struct pci_dev *dev) { }
+static inline void pci_disable_ptm(struct pci_dev *dev) { }
+#endif
+
unsigned long pci_cardbus_resource_alignment(struct resource *);
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
@@ -555,8 +573,8 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
/* PCI error reporting and recovery */
pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
- pci_channel_state_t state,
- pci_ers_result_t (*reset_link)(struct pci_dev *pdev));
+ pci_channel_state_t state,
+ pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev));
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 68da9280ff11..d9697892fa3e 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for PCI Express features and port driver
-pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o
+pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o rcec.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 65dff5f3457a..77b0f2c45bc0 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -300,7 +300,8 @@ int pci_aer_raw_clear_status(struct pci_dev *dev)
return -EIO;
port_type = pci_pcie_type(dev);
- if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
+ if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
+ port_type == PCI_EXP_TYPE_RC_EC) {
pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &status);
pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, status);
}
@@ -595,7 +596,8 @@ static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
a == &dev_attr_aer_rootport_total_err_fatal.attr ||
a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
- pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
+ ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_RC_EC)))
return 0;
return a->mode;
@@ -916,7 +918,10 @@ static bool find_source_device(struct pci_dev *parent,
if (result)
return true;
- pci_walk_bus(parent->subordinate, find_device_iter, e_info);
+ if (pci_pcie_type(parent) == PCI_EXP_TYPE_RC_EC)
+ pcie_walk_rcec(parent, find_device_iter, e_info);
+ else
+ pci_walk_bus(parent->subordinate, find_device_iter, e_info);
if (!e_info->error_dev_num) {
pci_info(parent, "can't find device of ID%04x\n", e_info->id);
@@ -1034,6 +1039,7 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
*/
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
+ int type = pci_pcie_type(dev);
int aer = dev->aer_cap;
int temp;
@@ -1052,8 +1058,9 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
&info->mask);
if (!(info->status & ~info->mask))
return 0;
- } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
+ } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_RC_EC ||
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
@@ -1205,6 +1212,7 @@ static int set_device_error_reporting(struct pci_dev *dev, void *data)
int type = pci_pcie_type(dev);
if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (type == PCI_EXP_TYPE_RC_EC) ||
(type == PCI_EXP_TYPE_UPSTREAM) ||
(type == PCI_EXP_TYPE_DOWNSTREAM)) {
if (enable)
@@ -1229,9 +1237,12 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
{
set_device_error_reporting(dev, &enable);
- if (!dev->subordinate)
- return;
- pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC)
+ pcie_walk_rcec(dev, set_device_error_reporting, &enable);
+ else if (dev->subordinate)
+ pci_walk_bus(dev->subordinate, set_device_error_reporting,
+ &enable);
+
}
/**
@@ -1329,6 +1340,11 @@ static int aer_probe(struct pcie_device *dev)
struct device *device = &dev->device;
struct pci_dev *port = dev->port;
+ /* Limit to Root Ports or Root Complex Event Collectors */
+ if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) &&
+ (pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT))
+ return -ENODEV;
+
rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
if (!rpc)
return -ENOMEM;
@@ -1350,41 +1366,74 @@ static int aer_probe(struct pcie_device *dev)
}
/**
- * aer_root_reset - reset link on Root Port
- * @dev: pointer to Root Port's pci_dev data structure
+ * aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP
+ * @dev: pointer to Root Port, RCEC, or RCiEP
*
- * Invoked by Port Bus driver when performing link reset at Root Port.
+ * Invoked by Port Bus driver when performing reset.
*/
static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
- int aer = dev->aer_cap;
+ int type = pci_pcie_type(dev);
+ struct pci_dev *root;
+ int aer;
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
u32 reg32;
int rc;
+ /*
+ * Only Root Ports and RCECs have AER Root Command and Root Status
+ * registers. If "dev" is an RCiEP, the relevant registers are in
+ * the RCEC.
+ */
+ if (type == PCI_EXP_TYPE_RC_END)
+ root = dev->rcec;
+ else
+ root = dev;
+
+ /*
+ * If the platform retained control of AER, an RCiEP may not have
+ * an RCEC visible to us, so dev->rcec ("root") may be NULL. In
+ * that case, firmware is responsible for these registers.
+ */
+ aer = root ? root->aer_cap : 0;
- /* Disable Root's interrupt in response to error messages */
- pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
- reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ if ((host->native_aer || pcie_ports_native) && aer) {
+ /* Disable Root's interrupt in response to error messages */
+ pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ }
- rc = pci_bus_error_reset(dev);
- pci_info(dev, "Root Port link has been reset\n");
+ if (type == PCI_EXP_TYPE_RC_EC || type == PCI_EXP_TYPE_RC_END) {
+ if (pcie_has_flr(dev)) {
+ rc = pcie_flr(dev);
+ pci_info(dev, "has been reset (%d)\n", rc);
+ } else {
+ pci_info(dev, "not reset (no FLR support)\n");
+ rc = -ENOTTY;
+ }
+ } else {
+ rc = pci_bus_error_reset(dev);
+ pci_info(dev, "Root Port link has been reset (%d)\n", rc);
+ }
- /* Clear Root Error Status */
- pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, reg32);
+ if ((host->native_aer || pcie_ports_native) && aer) {
+ /* Clear Root Error Status */
+ pci_read_config_dword(root, aer + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(root, aer + PCI_ERR_ROOT_STATUS, reg32);
- /* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
- reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ /* Enable Root Port's interrupt in response to error messages */
+ pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ }
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
- .port_type = PCI_EXP_TYPE_ROOT_PORT,
+ .port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index c2cbf425afc5..767f8859b99b 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -333,8 +333,11 @@ static int aer_inject(struct aer_error_inj *einj)
if (!dev)
return -ENODEV;
rpdev = pcie_find_root_port(dev);
+ /* If Root Port not found, try to find an RCEC */
+ if (!rpdev)
+ rpdev = dev->rcec;
if (!rpdev) {
- pci_err(dev, "Root port not found\n");
+ pci_err(dev, "Neither Root Port nor RCEC found\n");
ret = -ENODEV;
goto out_put;
}
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index c543f419d8f9..510f31f0ef6d 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -146,38 +146,71 @@ out:
return 0;
}
+/**
+ * pci_walk_bridge - walk bridges potentially AER affected
+ * @bridge: bridge which may be a Port, an RCEC, or an RCiEP
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
+ *
+ * If the device provided is a bridge, walk the subordinate bus, including
+ * any bridged devices on buses under this bus. Call the provided callback
+ * on each device found.
+ *
+ * If the device provided has no subordinate bus, e.g., an RCEC or RCiEP,
+ * call the callback on the device itself.
+ */
+static void pci_walk_bridge(struct pci_dev *bridge,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata)
+{
+ if (bridge->subordinate)
+ pci_walk_bus(bridge->subordinate, cb, userdata);
+ else
+ cb(bridge, userdata);
+}
+
pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
- pci_channel_state_t state,
- pci_ers_result_t (*reset_link)(struct pci_dev *pdev))
+ pci_channel_state_t state,
+ pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev))
{
+ int type = pci_pcie_type(dev);
+ struct pci_dev *bridge;
pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
- struct pci_bus *bus;
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
/*
- * Error recovery runs on all subordinates of the first downstream port.
- * If the downstream port detected the error, it is cleared at the end.
+ * If the error was detected by a Root Port, Downstream Port, RCEC,
+ * or RCiEP, recovery runs on the device itself. For Ports, that
+ * also includes any subordinate devices.
+ *
+ * If it was detected by another device (Endpoint, etc), recovery
+ * runs on the device and anything else under the same Port, i.e.,
+ * everything under "bridge".
*/
- if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM))
- dev = dev->bus->self;
- bus = dev->subordinate;
-
- pci_dbg(dev, "broadcast error_detected message\n");
+ if (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
+ type == PCI_EXP_TYPE_RC_EC ||
+ type == PCI_EXP_TYPE_RC_END)
+ bridge = dev;
+ else
+ bridge = pci_upstream_bridge(dev);
+
+ pci_dbg(bridge, "broadcast error_detected message\n");
if (state == pci_channel_io_frozen) {
- pci_walk_bus(bus, report_frozen_detected, &status);
- status = reset_link(dev);
+ pci_walk_bridge(bridge, report_frozen_detected, &status);
+ status = reset_subordinates(bridge);
if (status != PCI_ERS_RESULT_RECOVERED) {
- pci_warn(dev, "link reset failed\n");
+ pci_warn(bridge, "subordinate device reset failed\n");
goto failed;
}
} else {
- pci_walk_bus(bus, report_normal_detected, &status);
+ pci_walk_bridge(bridge, report_normal_detected, &status);
}
if (status == PCI_ERS_RESULT_CAN_RECOVER) {
status = PCI_ERS_RESULT_RECOVERED;
- pci_dbg(dev, "broadcast mmio_enabled message\n");
- pci_walk_bus(bus, report_mmio_enabled, &status);
+ pci_dbg(bridge, "broadcast mmio_enabled message\n");
+ pci_walk_bridge(bridge, report_mmio_enabled, &status);
}
if (status == PCI_ERS_RESULT_NEED_RESET) {
@@ -187,27 +220,35 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
* drivers' slot_reset callbacks?
*/
status = PCI_ERS_RESULT_RECOVERED;
- pci_dbg(dev, "broadcast slot_reset message\n");
- pci_walk_bus(bus, report_slot_reset, &status);
+ pci_dbg(bridge, "broadcast slot_reset message\n");
+ pci_walk_bridge(bridge, report_slot_reset, &status);
}
if (status != PCI_ERS_RESULT_RECOVERED)
goto failed;
- pci_dbg(dev, "broadcast resume message\n");
- pci_walk_bus(bus, report_resume, &status);
+ pci_dbg(bridge, "broadcast resume message\n");
+ pci_walk_bridge(bridge, report_resume, &status);
- if (pcie_aer_is_native(dev))
- pcie_clear_device_status(dev);
- pci_aer_clear_nonfatal_status(dev);
- pci_info(dev, "device recovery successful\n");
+ /*
+ * If we have native control of AER, clear error status in the Root
+ * Port or Downstream Port that signaled the error. If the
+ * platform retained control of AER, it is responsible for clearing
+ * this status. In that case, the signaling device may not even be
+ * visible to the OS.
+ */
+ if (host->native_aer || pcie_ports_native) {
+ pcie_clear_device_status(bridge);
+ pci_aer_clear_nonfatal_status(bridge);
+ }
+ pci_info(bridge, "device recovery successful\n");
return status;
failed:
- pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+ pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
/* TODO: Should kernel panic here? */
- pci_info(dev, "device recovery failed\n");
+ pci_info(bridge, "device recovery failed\n");
return status;
}
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 6a32970bb731..3fc08488d65f 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -310,7 +310,10 @@ static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign)
static void pcie_pme_mark_devices(struct pci_dev *port)
{
pcie_pme_can_wakeup(port, NULL);
- if (port->subordinate)
+
+ if (pci_pcie_type(port) == PCI_EXP_TYPE_RC_EC)
+ pcie_walk_rcec(port, pcie_pme_can_wakeup, NULL);
+ else if (port->subordinate)
pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL);
}
@@ -320,10 +323,16 @@ static void pcie_pme_mark_devices(struct pci_dev *port)
*/
static int pcie_pme_probe(struct pcie_device *srv)
{
- struct pci_dev *port;
+ struct pci_dev *port = srv->port;
struct pcie_pme_service_data *data;
+ int type = pci_pcie_type(port);
int ret;
+ /* Limit to Root Ports or Root Complex Event Collectors */
+ if (type != PCI_EXP_TYPE_RC_EC &&
+ type != PCI_EXP_TYPE_ROOT_PORT)
+ return -ENODEV;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -333,7 +342,6 @@ static int pcie_pme_probe(struct pcie_device *srv)
data->srv = srv;
set_service_data(srv, data);
- port = srv->port;
pcie_pme_interrupt_enable(port, false);
pcie_clear_root_pme_status(port);
@@ -445,7 +453,7 @@ static void pcie_pme_remove(struct pcie_device *srv)
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
- .port_type = PCI_EXP_TYPE_ROOT_PORT,
+ .port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_PME,
.probe = pcie_pme_probe,
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 50a9522ab07d..e1fed6649c41 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -233,12 +233,9 @@ static int get_port_device_capability(struct pci_dev *dev)
}
#endif
- /*
- * Root ports are capable of generating PME too. Root Complex
- * Event Collectors can also generate PMEs, but we don't handle
- * those yet.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
+ /* Root Ports and Root Complex Event Collectors may generate PMEs */
+ if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC) &&
(pcie_ports_native || host->native_pme)) {
services |= PCIE_PORT_SERVICE_PME;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3a3ce40ae1ab..0b250bc5f405 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -101,14 +101,19 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
static int pcie_portdrv_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
+ int type = pci_pcie_type(dev);
int status;
if (!pci_is_pcie(dev) ||
- ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
- (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) &&
- (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ ((type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (type != PCI_EXP_TYPE_UPSTREAM) &&
+ (type != PCI_EXP_TYPE_DOWNSTREAM) &&
+ (type != PCI_EXP_TYPE_RC_EC)))
return -ENODEV;
+ if (type == PCI_EXP_TYPE_RC_EC)
+ pcie_link_rcec(dev);
+
status = pcie_port_device_register(dev);
if (status)
return status;
@@ -195,6 +200,8 @@ static const struct pci_device_id port_pci_ids[] = {
{ PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) },
/* subtractive decode PCI-to-PCI bridge, class type is 060401h */
{ PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) },
+ /* handle any Root Complex Event Collector */
+ { PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) },
{ },
};
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 357a454cafa0..95d4eef2c9e8 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -29,6 +29,64 @@ static void pci_ptm_info(struct pci_dev *dev)
dev->ptm_root ? " (root)" : "", clock_desc);
}
+void pci_disable_ptm(struct pci_dev *dev)
+{
+ int ptm;
+ u16 ctrl;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!ptm)
+ return;
+
+ pci_read_config_word(dev, ptm + PCI_PTM_CTRL, &ctrl);
+ ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
+ pci_write_config_word(dev, ptm + PCI_PTM_CTRL, ctrl);
+}
+
+void pci_save_ptm_state(struct pci_dev *dev)
+{
+ int ptm;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!ptm)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
+ if (!save_state) {
+ pci_err(dev, "no suspend buffer for PTM\n");
+ return;
+ }
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
+}
+
+void pci_restore_ptm_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ int ptm;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
+ ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!save_state || !ptm)
+ return;
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_write_config_word(dev, ptm + PCI_PTM_CTRL, *cap);
+}
+
void pci_ptm_init(struct pci_dev *dev)
{
int pos;
@@ -65,6 +123,8 @@ void pci_ptm_init(struct pci_dev *dev)
if (!pos)
return;
+ pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u16));
+
pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c
new file mode 100644
index 000000000000..2c5c552994e4
--- /dev/null
+++ b/drivers/pci/pcie/rcec.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Root Complex Event Collector Support
+ *
+ * Authors:
+ * Sean V Kelley <sean.v.kelley@intel.com>
+ * Qiuxu Zhuo <qiuxu.zhuo@intel.com>
+ *
+ * Copyright (C) 2020 Intel Corp.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+
+#include "../pci.h"
+
+struct walk_rcec_data {
+ struct pci_dev *rcec;
+ int (*user_callback)(struct pci_dev *dev, void *data);
+ void *user_data;
+};
+
+static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
+{
+ unsigned long bitmap = rcec->rcec_ea->bitmap;
+ unsigned int devn;
+
+ /* An RCiEP found on a different bus in range */
+ if (rcec->bus->number != rciep->bus->number)
+ return true;
+
+ /* Same bus, so check bitmap */
+ for_each_set_bit(devn, &bitmap, 32)
+ if (devn == rciep->devfn)
+ return true;
+
+ return false;
+}
+
+static int link_rcec_helper(struct pci_dev *dev, void *data)
+{
+ struct walk_rcec_data *rcec_data = data;
+ struct pci_dev *rcec = rcec_data->rcec;
+
+ if ((pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) &&
+ rcec_assoc_rciep(rcec, dev)) {
+ dev->rcec = rcec;
+ pci_dbg(dev, "PME & error events signaled via %s\n",
+ pci_name(rcec));
+ }
+
+ return 0;
+}
+
+static int walk_rcec_helper(struct pci_dev *dev, void *data)
+{
+ struct walk_rcec_data *rcec_data = data;
+ struct pci_dev *rcec = rcec_data->rcec;
+
+ if ((pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) &&
+ rcec_assoc_rciep(rcec, dev))
+ rcec_data->user_callback(dev, rcec_data->user_data);
+
+ return 0;
+}
+
+static void walk_rcec(int (*cb)(struct pci_dev *dev, void *data),
+ void *userdata)
+{
+ struct walk_rcec_data *rcec_data = userdata;
+ struct pci_dev *rcec = rcec_data->rcec;
+ u8 nextbusn, lastbusn;
+ struct pci_bus *bus;
+ unsigned int bnr;
+
+ if (!rcec->rcec_ea)
+ return;
+
+ /* Walk own bus for bitmap based association */
+ pci_walk_bus(rcec->bus, cb, rcec_data);
+
+ nextbusn = rcec->rcec_ea->nextbusn;
+ lastbusn = rcec->rcec_ea->lastbusn;
+
+ /* All RCiEP devices are on the same bus as the RCEC */
+ if (nextbusn == 0xff && lastbusn == 0x00)
+ return;
+
+ for (bnr = nextbusn; bnr <= lastbusn; bnr++) {
+ /* No association indicated (PCIe 5.0-1, 7.9.10.3) */
+ if (bnr == rcec->bus->number)
+ continue;
+
+ bus = pci_find_bus(pci_domain_nr(rcec->bus), bnr);
+ if (!bus)
+ continue;
+
+ /* Find RCiEP devices on the given bus ranges */
+ pci_walk_bus(bus, cb, rcec_data);
+ }
+}
+
+/**
+ * pcie_link_rcec - Link RCiEP devices associated with RCEC.
+ * @rcec: RCEC whose RCiEP devices should be linked.
+ *
+ * Link the given RCEC to each RCiEP device found.
+ */
+void pcie_link_rcec(struct pci_dev *rcec)
+{
+ struct walk_rcec_data rcec_data;
+
+ if (!rcec->rcec_ea)
+ return;
+
+ rcec_data.rcec = rcec;
+ rcec_data.user_callback = NULL;
+ rcec_data.user_data = NULL;
+
+ walk_rcec(link_rcec_helper, &rcec_data);
+}
+
+/**
+ * pcie_walk_rcec - Walk RCiEP devices associating with RCEC and call callback.
+ * @rcec: RCEC whose RCiEP devices should be walked
+ * @cb: Callback to be called for each RCiEP device found
+ * @userdata: Arbitrary pointer to be passed to callback
+ *
+ * Walk the given RCEC. Call the callback on each RCiEP found.
+ *
+ * If @cb returns anything other than 0, break out.
+ */
+void pcie_walk_rcec(struct pci_dev *rcec, int (*cb)(struct pci_dev *, void *),
+ void *userdata)
+{
+ struct walk_rcec_data rcec_data;
+
+ if (!rcec->rcec_ea)
+ return;
+
+ rcec_data.rcec = rcec;
+ rcec_data.user_callback = cb;
+ rcec_data.user_data = userdata;
+
+ walk_rcec(walk_rcec_helper, &rcec_data);
+}
+
+void pci_rcec_init(struct pci_dev *dev)
+{
+ struct rcec_ea *rcec_ea;
+ u32 rcec, hdr, busn;
+ u8 ver;
+
+ /* Only for Root Complex Event Collectors */
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_EC)
+ return;
+
+ rcec = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_RCEC);
+ if (!rcec)
+ return;
+
+ rcec_ea = kzalloc(sizeof(*rcec_ea), GFP_KERNEL);
+ if (!rcec_ea)
+ return;
+
+ pci_read_config_dword(dev, rcec + PCI_RCEC_RCIEP_BITMAP,
+ &rcec_ea->bitmap);
+
+ /* Check whether RCEC BUSN register is present */
+ pci_read_config_dword(dev, rcec, &hdr);
+ ver = PCI_EXT_CAP_VER(hdr);
+ if (ver >= PCI_RCEC_BUSN_REG_VER) {
+ pci_read_config_dword(dev, rcec + PCI_RCEC_BUSN, &busn);
+ rcec_ea->nextbusn = PCI_RCEC_BUSN_NEXT(busn);
+ rcec_ea->lastbusn = PCI_RCEC_BUSN_LAST(busn);
+ } else {
+ /* Avoid later ver check by setting nextbusn */
+ rcec_ea->nextbusn = 0xff;
+ rcec_ea->lastbusn = 0x00;
+ }
+
+ dev->rcec_ea = rcec_ea;
+}
+
+void pci_rcec_exit(struct pci_dev *dev)
+{
+ kfree(dev->rcec_ea);
+ dev->rcec_ea = NULL;
+}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4289030b0fff..953f15abc850 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -165,7 +165,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
/**
- * pci_read_base - Read a PCI BAR
+ * __pci_read_base - Read a PCI BAR
* @dev: the PCI device
* @type: type of the BAR
* @res: resource buffer to be filled in
@@ -677,7 +677,7 @@ const unsigned char pcie_link_speed[] = {
PCIE_SPEED_8_0GT, /* 3 */
PCIE_SPEED_16_0GT, /* 4 */
PCIE_SPEED_32_0GT, /* 5 */
- PCI_SPEED_UNKNOWN, /* 6 */
+ PCIE_SPEED_64_0GT, /* 6 */
PCI_SPEED_UNKNOWN, /* 7 */
PCI_SPEED_UNKNOWN, /* 8 */
PCI_SPEED_UNKNOWN, /* 9 */
@@ -719,6 +719,7 @@ const char *pci_speed_string(enum pci_bus_speed speed)
"8.0 GT/s PCIe", /* 0x16 */
"16.0 GT/s PCIe", /* 0x17 */
"32.0 GT/s PCIe", /* 0x18 */
+ "64.0 GT/s PCIe", /* 0x19 */
};
if (speed < ARRAY_SIZE(speed_strings))
@@ -1612,7 +1613,7 @@ static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
}
/**
- * pci_cfg_space_size - Get the configuration space size of the PCI device
+ * pci_cfg_space_size_ext - Get the configuration space size of the PCI device
* @dev: PCI device
*
* Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
@@ -1716,22 +1717,6 @@ static u8 pci_hdr_type(struct pci_dev *dev)
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
-static void pci_msi_setup_pci_dev(struct pci_dev *dev)
-{
- /*
- * Disable the MSI hardware to avoid screaming interrupts
- * during boot. This is the power on reset default so
- * usually this should be a noop.
- */
- dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (dev->msi_cap)
- pci_msi_set_enable(dev, 0);
-
- dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (dev->msix_cap)
- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
-}
-
/**
* pci_intx_mask_broken - Test PCI_COMMAND_INTX_DISABLE writability
* @dev: PCI device
@@ -2216,6 +2201,7 @@ static void pci_configure_device(struct pci_dev *dev)
static void pci_release_capabilities(struct pci_dev *dev)
{
pci_aer_exit(dev);
+ pci_rcec_exit(dev);
pci_vpd_release(dev);
pci_iov_release(dev);
pci_free_cap_save_buffers(dev);
@@ -2397,9 +2383,8 @@ void pcie_report_downtraining(struct pci_dev *dev)
static void pci_init_capabilities(struct pci_dev *dev)
{
pci_ea_init(dev); /* Enhanced Allocation */
-
- /* Setup MSI caps & disable MSI/MSI-X interrupts */
- pci_msi_setup_pci_dev(dev);
+ pci_msi_init(dev); /* Disable MSI */
+ pci_msix_init(dev); /* Disable MSI-X */
/* Buffers for saving PCIe and PCI-X capabilities */
pci_allocate_cap_save_buffers(dev);
@@ -2415,6 +2400,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_ptm_init(dev); /* Precision Time Measurement */
pci_aer_init(dev); /* Advanced Error Reporting */
pci_dpc_init(dev); /* Downstream Port Containment */
+ pci_rcec_init(dev); /* Root Complex Event Collector */
pcie_report_downtraining(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f70692ac79c5..653660e3ba9e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2356,9 +2356,9 @@ static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
dev->clear_retrain_link = 1;
pci_info(dev, "Enable PCIe Retrain Link quirk\n");
}
-DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
-DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
-DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link);
static void fixup_rev1_53c810(struct pci_dev *dev)
{
@@ -2522,6 +2522,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disab
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SAMSUNG, 0xa5e3, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void quirk_disable_msi(struct pci_dev *dev)
@@ -3998,6 +3999,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
quirk_dma_func1_alias);
@@ -5164,6 +5168,18 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
(pdev->device == 0x7340 && pdev->revision != 0xc5))
return;
+ if (pdev->device == 0x15d8) {
+ if (pdev->revision == 0xcf &&
+ pdev->subsystem_vendor == 0xea50 &&
+ (pdev->subsystem_device == 0xce19 ||
+ pdev->subsystem_device == 0xcc10 ||
+ pdev->subsystem_device == 0xcc08))
+ goto no_ats;
+ else
+ return;
+ }
+
+no_ats:
pci_info(pdev, "disabling ATS\n");
pdev->ats_cap = 0;
}
@@ -5176,6 +5192,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
+/* AMD Raven platform iGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
@@ -5567,17 +5585,26 @@ static void pci_fixup_no_d0_pme(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
/*
- * Device [12d8:0x400e] and [12d8:0x400f]
+ * Device 12d8:0x400e [OHCI] and 12d8:0x400f [EHCI]
+ *
* These devices advertise PME# support in all power states but don't
* reliably assert it.
+ *
+ * These devices also advertise MSI, but documentation (PI7C9X440SL.pdf)
+ * says "The MSI Function is not implemented on this device" in chapters
+ * 7.3.27, 7.3.29-7.3.31.
*/
-static void pci_fixup_no_pme(struct pci_dev *dev)
+static void pci_fixup_no_msi_no_pme(struct pci_dev *dev)
{
+#ifdef CONFIG_PCI_MSI
+ pci_info(dev, "MSI is not implemented on this device, disabling it\n");
+ dev->no_msi = 1;
+#endif
pci_info(dev, "PME# is unreliable, disabling it\n");
dev->pme_support = 0;
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_msi_no_pme);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_msi_no_pme);
static void apex_pci_fixup_class(struct pci_dev *pdev)
{
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 3861505741e6..d627dd9179b4 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -272,6 +272,9 @@ placeholder:
goto err;
}
+ INIT_LIST_HEAD(&slot->list);
+ list_add(&slot->list, &parent->slots);
+
err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
"%s", slot_name);
if (err) {
@@ -279,9 +282,6 @@ placeholder:
goto err;
}
- INIT_LIST_HEAD(&slot->list);
- list_add(&slot->list, &parent->slots);
-
down_read(&pci_bus_sem);
list_for_each_entry(dev, &parent->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot_nr)
@@ -323,7 +323,7 @@ EXPORT_SYMBOL_GPL(pci_destroy_slot);
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
#include <linux/pci_hotplug.h>
/**
- * pci_hp_create_link - create symbolic link to the hotplug driver module.
+ * pci_hp_create_module_link - create symbolic link to hotplug driver module
* @pci_slot: struct pci_slot
*
* Helper function for pci_hotplug_core.c to create symbolic link to
@@ -349,7 +349,8 @@ void pci_hp_create_module_link(struct pci_slot *pci_slot)
EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
/**
- * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
+ * pci_hp_remove_module_link - remove symbolic link to the hotplug driver
+ * module.
* @pci_slot: struct pci_slot
*
* Helper function for pci_hotplug_core.c to remove symbolic link to
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 82d10b6661c7..d13b8d1a780a 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -244,10 +244,6 @@ config PCMCIA_VRC4171
tristate "NEC VRC4171 Card Controllers support"
depends on CPU_VR41XX && ISA && PCMCIA
-config PCMCIA_VRC4173
- tristate "NEC VRC4173 CARDU support"
- depends on CPU_VR41XX && PCI && PCMCIA
-
config OMAP_CF
tristate "OMAP CompactFlash Controller"
depends on PCMCIA && ARCH_OMAP16XX
@@ -258,6 +254,7 @@ config OMAP_CF
config AT91_CF
tristate "AT91 CompactFlash Controller"
depends on PCI
+ depends on OF
depends on PCMCIA && ARCH_AT91
help
Say Y here to support the CompactFlash controller on AT91 chips.
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 01779c5c45f3..d82c07c4806b 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -30,7 +30,6 @@ obj-$(CONFIG_PCMCIA_SA1100) += sa1100_cs.o
obj-$(CONFIG_PCMCIA_SA1111) += sa1111_cs.o
obj-$(CONFIG_PCMCIA_BCM63XX) += bcm63xx_pcmcia.o
obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
-obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
obj-$(CONFIG_OMAP_CF) += omap_cf.o
obj-$(CONFIG_AT91_CF) += at91_cf.o
obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 7db0e9c74dfc..6b1edfc890a3 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/gpio.h>
-#include <linux/platform_data/atmel.h>
#include <linux/io.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
@@ -35,6 +34,17 @@
#define CF_IO_PHYS (1 << 23)
#define CF_MEM_PHYS (0x017ff800)
+struct at91_cf_data {
+ int irq_pin; /* I/O IRQ */
+ int det_pin; /* Card detect */
+ int vcc_pin; /* power switching */
+ int rst_pin; /* card reset */
+ u8 chipselect; /* EBI Chip Select number */
+ u8 flags;
+#define AT91_CF_TRUE_IDE 0x01
+#define AT91_IDE_SWAP_A0_A2 0x02
+};
+
struct regmap *mc;
/*--------------------------------------------------------------------------*/
@@ -209,16 +219,18 @@ static struct pccard_operations at91_cf_ops = {
/*--------------------------------------------------------------------------*/
-#if defined(CONFIG_OF)
static const struct of_device_id at91_cf_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-cf" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_cf_dt_ids);
-static int at91_cf_dt_init(struct platform_device *pdev)
+static int at91_cf_probe(struct platform_device *pdev)
{
- struct at91_cf_data *board;
+ struct at91_cf_socket *cf;
+ struct at91_cf_data *board;
+ struct resource *io;
+ int status;
board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
if (!board)
@@ -229,33 +241,9 @@ static int at91_cf_dt_init(struct platform_device *pdev)
board->vcc_pin = of_get_gpio(pdev->dev.of_node, 2);
board->rst_pin = of_get_gpio(pdev->dev.of_node, 3);
- pdev->dev.platform_data = board;
-
mc = syscon_regmap_lookup_by_compatible("atmel,at91rm9200-sdramc");
-
- return PTR_ERR_OR_ZERO(mc);
-}
-#else
-static int at91_cf_dt_init(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-#endif
-
-static int at91_cf_probe(struct platform_device *pdev)
-{
- struct at91_cf_socket *cf;
- struct at91_cf_data *board = pdev->dev.platform_data;
- struct resource *io;
- int status;
-
- if (!board) {
- status = at91_cf_dt_init(pdev);
- if (status)
- return status;
-
- board = pdev->dev.platform_data;
- }
+ if (IS_ERR(mc))
+ return PTR_ERR(mc);
if (!gpio_is_valid(board->det_pin) || !gpio_is_valid(board->rst_pin))
return -ENODEV;
@@ -399,7 +387,7 @@ static int at91_cf_resume(struct platform_device *pdev)
static struct platform_driver at91_cf_driver = {
.driver = {
.name = "at91_cf",
- .of_match_table = of_match_ptr(at91_cf_dt_ids),
+ .of_match_table = at91_cf_dt_ids,
},
.probe = at91_cf_probe,
.remove = at91_cf_remove,
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index a7c7c7cd2326..a6fbc709913e 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -452,7 +452,7 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
printk(KERN_INFO "db1xxx-ss: unknown board %d!\n", bid);
ret = -ENODEV;
goto out0;
- };
+ }
/*
* gather resources necessary and optional nice-to-haves to
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 35158cfd9c1a..40a5cffe24a4 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -229,6 +229,8 @@ static int electra_cf_probe(struct platform_device *ofdev)
cf->socket.pci_irq = cf->irq;
+ status = -EINVAL;
+
prop = of_get_property(np, "card-detect-gpio", NULL);
if (!prop)
goto fail1;
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index d3ef5534991e..f0b2c2d03469 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -252,11 +252,15 @@ static int __init omap_cf_probe(struct platform_device *pdev)
/* pcmcia layer only remaps "real" memory */
cf->socket.io_offset = (unsigned long)
ioremap(cf->phys_cf + SZ_4K, SZ_2K);
- if (!cf->socket.io_offset)
+ if (!cf->socket.io_offset) {
+ status = -ENOMEM;
goto fail1;
+ }
- if (!request_mem_region(cf->phys_cf, SZ_8K, driver_name))
+ if (!request_mem_region(cf->phys_cf, SZ_8K, driver_name)) {
+ status = -ENXIO;
goto fail1;
+ }
/* NOTE: CF conflicts with MMC1 */
omap_cfg_reg(W11_1610_CF_CD1);
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
deleted file mode 100644
index 9fb0c3addfd4..000000000000
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ /dev/null
@@ -1,591 +0,0 @@
-/*
- * FILE NAME
- * drivers/pcmcia/vrc4173_cardu.c
- *
- * BRIEF MODULE DESCRIPTION
- * NEC VRC4173 CARDU driver for Socket Services
- * (This device doesn't support CardBus. it is supporting only 16bit PC Card.)
- *
- * Copyright 2002,2003 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-
-#include <pcmcia/ss.h>
-
-#include "vrc4173_cardu.h"
-
-MODULE_DESCRIPTION("NEC VRC4173 CARDU driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_LICENSE("GPL");
-
-static int vrc4173_cardu_slots;
-
-static vrc4173_socket_t cardu_sockets[CARDU_MAX_SOCKETS];
-
-extern struct socket_info_t *pcmcia_register_socket (int slot,
- struct pccard_operations *vtable,
- int use_bus_pm);
-extern void pcmcia_unregister_socket(struct socket_info_t *s);
-
-static inline uint8_t exca_readb(vrc4173_socket_t *socket, uint16_t offset)
-{
- return readb(socket->base + EXCA_REGS_BASE + offset);
-}
-
-static inline uint16_t exca_readw(vrc4173_socket_t *socket, uint16_t offset)
-{
- uint16_t val;
-
- val = readb(socket->base + EXCA_REGS_BASE + offset);
- val |= (u16)readb(socket->base + EXCA_REGS_BASE + offset + 1) << 8;
-
- return val;
-}
-
-static inline void exca_writeb(vrc4173_socket_t *socket, uint16_t offset, uint8_t val)
-{
- writeb(val, socket->base + EXCA_REGS_BASE + offset);
-}
-
-static inline void exca_writew(vrc4173_socket_t *socket, uint8_t offset, uint16_t val)
-{
- writeb((u8)val, socket->base + EXCA_REGS_BASE + offset);
- writeb((u8)(val >> 8), socket->base + EXCA_REGS_BASE + offset + 1);
-}
-
-static inline uint32_t cardbus_socket_readl(vrc4173_socket_t *socket, u16 offset)
-{
- return readl(socket->base + CARDBUS_SOCKET_REGS_BASE + offset);
-}
-
-static inline void cardbus_socket_writel(vrc4173_socket_t *socket, u16 offset, uint32_t val)
-{
- writel(val, socket->base + CARDBUS_SOCKET_REGS_BASE + offset);
-}
-
-static void cardu_pciregs_init(struct pci_dev *dev)
-{
- u32 syscnt;
- u16 brgcnt;
- u8 devcnt;
-
- pci_write_config_dword(dev, 0x1c, 0x10000000);
- pci_write_config_dword(dev, 0x20, 0x17fff000);
- pci_write_config_dword(dev, 0x2c, 0);
- pci_write_config_dword(dev, 0x30, 0xfffc);
-
- pci_read_config_word(dev, BRGCNT, &brgcnt);
- brgcnt &= ~IREQ_INT;
- pci_write_config_word(dev, BRGCNT, brgcnt);
-
- pci_read_config_dword(dev, SYSCNT, &syscnt);
- syscnt &= ~(BAD_VCC_REQ_DISB|PCPCI_EN|CH_ASSIGN_MASK|SUB_ID_WR_EN|PCI_CLK_RIN);
- syscnt |= (CH_ASSIGN_NODMA|ASYN_INT_MODE);
- pci_write_config_dword(dev, SYSCNT, syscnt);
-
- pci_read_config_byte(dev, DEVCNT, &devcnt);
- devcnt &= ~(ZOOM_VIDEO_EN|SR_PCI_INT_SEL_MASK|PCI_INT_MODE|IRQ_MODE);
- devcnt |= (SR_PCI_INT_SEL_NONE|IFG);
- pci_write_config_byte(dev, DEVCNT, devcnt);
-
- pci_write_config_byte(dev, CHIPCNT, S_PREF_DISB);
-
- pci_write_config_byte(dev, SERRDIS, 0);
-}
-
-static int cardu_init(unsigned int slot)
-{
- vrc4173_socket_t *socket = &cardu_sockets[slot];
-
- cardu_pciregs_init(socket->dev);
-
- /* CARD_SC bits are cleared by reading CARD_SC. */
- exca_writeb(socket, GLO_CNT, 0);
-
- socket->cap.features |= SS_CAP_PCCARD | SS_CAP_PAGE_REGS;
- socket->cap.irq_mask = 0;
- socket->cap.map_size = 0x1000;
- socket->cap.pci_irq = socket->dev->irq;
- socket->events = 0;
- spin_lock_init(socket->event_lock);
-
- /* Enable PC Card status interrupts */
- exca_writeb(socket, CARD_SCI, CARD_DT_EN|RDY_EN|BAT_WAR_EN|BAT_DEAD_EN);
-
- return 0;
-}
-
-static int cardu_register_callback(unsigned int sock,
- void (*handler)(void *, unsigned int),
- void * info)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
-
- socket->handler = handler;
- socket->info = info;
-
- return 0;
-}
-
-static int cardu_inquire_socket(unsigned int sock, socket_cap_t *cap)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
-
- *cap = socket->cap;
-
- return 0;
-}
-
-static int cardu_get_status(unsigned int sock, u_int *value)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint32_t state;
- uint8_t status;
- u_int val = 0;
-
- status = exca_readb(socket, IF_STATUS);
- if (status & CARD_PWR) val |= SS_POWERON;
- if (status & READY) val |= SS_READY;
- if (status & CARD_WP) val |= SS_WRPROT;
- if ((status & (CARD_DETECT1|CARD_DETECT2)) == (CARD_DETECT1|CARD_DETECT2))
- val |= SS_DETECT;
- if (exca_readb(socket, INT_GEN_CNT) & CARD_TYPE_IO) {
- if (status & STSCHG) val |= SS_STSCHG;
- } else {
- status &= BV_DETECT_MASK;
- if (status != BV_DETECT_GOOD) {
- if (status == BV_DETECT_WARN) val |= SS_BATWARN;
- else val |= SS_BATDEAD;
- }
- }
-
- state = cardbus_socket_readl(socket, SKT_PRE_STATE);
- if (state & VOL_3V_CARD_DT) val |= SS_3VCARD;
- if (state & VOL_XV_CARD_DT) val |= SS_XVCARD;
- if (state & CB_CARD_DT) val |= SS_CARDBUS;
- if (!(state &
- (VOL_YV_CARD_DT|VOL_XV_CARD_DT|VOL_3V_CARD_DT|VOL_5V_CARD_DT|CCD20|CCD10)))
- val |= SS_PENDING;
-
- *value = val;
-
- return 0;
-}
-
-static inline uint8_t set_Vcc_value(u_char Vcc)
-{
- switch (Vcc) {
- case 33:
- return VCC_3V;
- case 50:
- return VCC_5V;
- }
-
- return VCC_0V;
-}
-
-static inline uint8_t set_Vpp_value(u_char Vpp)
-{
- switch (Vpp) {
- case 33:
- case 50:
- return VPP_VCC;
- case 120:
- return VPP_12V;
- }
-
- return VPP_0V;
-}
-
-static int cardu_set_socket(unsigned int sock, socket_state_t *state)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint8_t val;
-
- if (((state->Vpp == 33) || (state->Vpp == 50)) && (state->Vpp != state->Vcc))
- return -EINVAL;
-
- val = set_Vcc_value(state->Vcc);
- val |= set_Vpp_value(state->Vpp);
- if (state->flags & SS_OUTPUT_ENA) val |= CARD_OUT_EN;
- exca_writeb(socket, PWR_CNT, val);
-
- val = exca_readb(socket, INT_GEN_CNT) & CARD_REST0;
- if (state->flags & SS_RESET) val &= ~CARD_REST0;
- else val |= CARD_REST0;
- if (state->flags & SS_IOCARD) val |= CARD_TYPE_IO;
- exca_writeb(socket, INT_GEN_CNT, val);
-
- return 0;
-}
-
-static int cardu_get_io_map(unsigned int sock, struct pccard_io_map *io)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint8_t ioctl, window;
- u_char map;
-
- map = io->map;
- if (map > 1)
- return -EINVAL;
-
- io->start = exca_readw(socket, IO_WIN_SA(map));
- io->stop = exca_readw(socket, IO_WIN_EA(map));
-
- ioctl = exca_readb(socket, IO_WIN_CNT);
- window = exca_readb(socket, ADR_WIN_EN);
- io->flags = (window & IO_WIN_EN(map)) ? MAP_ACTIVE : 0;
- if (ioctl & IO_WIN_DATA_AUTOSZ(map))
- io->flags |= MAP_AUTOSZ;
- else if (ioctl & IO_WIN_DATA_16BIT(map))
- io->flags |= MAP_16BIT;
-
- return 0;
-}
-
-static int cardu_set_io_map(unsigned int sock, struct pccard_io_map *io)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint16_t ioctl;
- uint8_t window, enable;
- u_char map;
-
- map = io->map;
- if (map > 1)
- return -EINVAL;
-
- window = exca_readb(socket, ADR_WIN_EN);
- enable = IO_WIN_EN(map);
-
- if (window & enable) {
- window &= ~enable;
- exca_writeb(socket, ADR_WIN_EN, window);
- }
-
- exca_writew(socket, IO_WIN_SA(map), io->start);
- exca_writew(socket, IO_WIN_EA(map), io->stop);
-
- ioctl = exca_readb(socket, IO_WIN_CNT) & ~IO_WIN_CNT_MASK(map);
- if (io->flags & MAP_AUTOSZ) ioctl |= IO_WIN_DATA_AUTOSZ(map);
- else if (io->flags & MAP_16BIT) ioctl |= IO_WIN_DATA_16BIT(map);
- exca_writeb(socket, IO_WIN_CNT, ioctl);
-
- if (io->flags & MAP_ACTIVE)
- exca_writeb(socket, ADR_WIN_EN, window | enable);
-
- return 0;
-}
-
-static int cardu_get_mem_map(unsigned int sock, struct pccard_mem_map *mem)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint32_t start, stop, offset, page;
- uint8_t window;
- u_char map;
-
- map = mem->map;
- if (map > 4)
- return -EINVAL;
-
- window = exca_readb(socket, ADR_WIN_EN);
- mem->flags = (window & MEM_WIN_EN(map)) ? MAP_ACTIVE : 0;
-
- start = exca_readw(socket, MEM_WIN_SA(map));
- mem->flags |= (start & MEM_WIN_DSIZE) ? MAP_16BIT : 0;
- start = (start & 0x0fff) << 12;
-
- stop = exca_readw(socket, MEM_WIN_EA(map));
- stop = ((stop & 0x0fff) << 12) + 0x0fff;
-
- offset = exca_readw(socket, MEM_WIN_OA(map));
- mem->flags |= (offset & MEM_WIN_WP) ? MAP_WRPROT : 0;
- mem->flags |= (offset & MEM_WIN_REGSET) ? MAP_ATTRIB : 0;
- offset = ((offset & 0x3fff) << 12) + start;
- mem->card_start = offset & 0x03ffffff;
-
- page = exca_readb(socket, MEM_WIN_SAU(map)) << 24;
- mem->sys_start = start + page;
- mem->sys_stop = start + page;
-
- return 0;
-}
-
-static int cardu_set_mem_map(unsigned int sock, struct pccard_mem_map *mem)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint16_t value;
- uint8_t window, enable;
- u_long sys_start, sys_stop, card_start;
- u_char map;
-
- map = mem->map;
- sys_start = mem->sys_start;
- sys_stop = mem->sys_stop;
- card_start = mem->card_start;
-
- if (map > 4 || sys_start > sys_stop || ((sys_start ^ sys_stop) >> 24) ||
- (card_start >> 26))
- return -EINVAL;
-
- window = exca_readb(socket, ADR_WIN_EN);
- enable = MEM_WIN_EN(map);
- if (window & enable) {
- window &= ~enable;
- exca_writeb(socket, ADR_WIN_EN, window);
- }
-
- exca_writeb(socket, MEM_WIN_SAU(map), sys_start >> 24);
-
- value = (sys_start >> 12) & 0x0fff;
- if (mem->flags & MAP_16BIT) value |= MEM_WIN_DSIZE;
- exca_writew(socket, MEM_WIN_SA(map), value);
-
- value = (sys_stop >> 12) & 0x0fff;
- exca_writew(socket, MEM_WIN_EA(map), value);
-
- value = ((card_start - sys_start) >> 12) & 0x3fff;
- if (mem->flags & MAP_WRPROT) value |= MEM_WIN_WP;
- if (mem->flags & MAP_ATTRIB) value |= MEM_WIN_REGSET;
- exca_writew(socket, MEM_WIN_OA(map), value);
-
- if (mem->flags & MAP_ACTIVE)
- exca_writeb(socket, ADR_WIN_EN, window | enable);
-
- return 0;
-}
-
-static void cardu_proc_setup(unsigned int sock, struct proc_dir_entry *base)
-{
-}
-
-static struct pccard_operations cardu_operations = {
- .init = cardu_init,
- .register_callback = cardu_register_callback,
- .inquire_socket = cardu_inquire_socket,
- .get_status = cardu_get_status,
- .set_socket = cardu_set_socket,
- .get_io_map = cardu_get_io_map,
- .set_io_map = cardu_set_io_map,
- .get_mem_map = cardu_get_mem_map,
- .set_mem_map = cardu_set_mem_map,
- .proc_setup = cardu_proc_setup,
-};
-
-static void cardu_bh(void *data)
-{
- vrc4173_socket_t *socket = (vrc4173_socket_t *)data;
- uint16_t events;
-
- spin_lock_irq(&socket->event_lock);
- events = socket->events;
- socket->events = 0;
- spin_unlock_irq(&socket->event_lock);
-
- if (socket->handler)
- socket->handler(socket->info, events);
-}
-
-static uint16_t get_events(vrc4173_socket_t *socket)
-{
- uint16_t events = 0;
- uint8_t csc, status;
-
- status = exca_readb(socket, IF_STATUS);
- csc = exca_readb(socket, CARD_SC);
- if ((csc & CARD_DT_CHG) &&
- ((status & (CARD_DETECT1|CARD_DETECT2)) == (CARD_DETECT1|CARD_DETECT2)))
- events |= SS_DETECT;
-
- if ((csc & RDY_CHG) && (status & READY))
- events |= SS_READY;
-
- if (exca_readb(socket, INT_GEN_CNT) & CARD_TYPE_IO) {
- if ((csc & BAT_DEAD_ST_CHG) && (status & STSCHG))
- events |= SS_STSCHG;
- } else {
- if (csc & (BAT_WAR_CHG|BAT_DEAD_ST_CHG)) {
- if ((status & BV_DETECT_MASK) != BV_DETECT_GOOD) {
- if (status == BV_DETECT_WARN) events |= SS_BATWARN;
- else events |= SS_BATDEAD;
- }
- }
- }
-
- return events;
-}
-
-static void cardu_interrupt(int irq, void *dev_id)
-{
- vrc4173_socket_t *socket = (vrc4173_socket_t *)dev_id;
- uint16_t events;
-
- INIT_WORK(&socket->tq_work, cardu_bh, socket);
-
- events = get_events(socket);
- if (events) {
- spin_lock(&socket->event_lock);
- socket->events |= events;
- spin_unlock(&socket->event_lock);
- schedule_work(&socket->tq_work);
- }
-}
-
-static int vrc4173_cardu_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
-{
- vrc4173_socket_t *socket;
- unsigned long start, len, flags;
- int slot, err, ret;
-
- slot = vrc4173_cardu_slots++;
- socket = &cardu_sockets[slot];
- if (socket->noprobe != 0)
- return -EBUSY;
-
- sprintf(socket->name, "NEC VRC4173 CARDU%1d", slot+1);
-
- if ((err = pci_enable_device(dev)) < 0)
- return err;
-
- start = pci_resource_start(dev, 0);
- if (start == 0) {
- ret = -ENODEV;
- goto disable;
- }
-
- len = pci_resource_len(dev, 0);
- if (len == 0) {
- ret = -ENODEV;
- goto disable;
- }
-
- flags = pci_resource_flags(dev, 0);
- if ((flags & IORESOURCE_MEM) == 0) {
- ret = -EBUSY;
- goto disable;
- }
-
- err = pci_request_regions(dev, socket->name);
- if (err < 0) {
- ret = err;
- goto disable;
- }
-
- socket->base = ioremap(start, len);
- if (socket->base == NULL) {
- ret = -ENODEV;
- goto release;
- }
-
- socket->dev = dev;
-
- socket->pcmcia_socket = pcmcia_register_socket(slot, &cardu_operations, 1);
- if (socket->pcmcia_socket == NULL) {
- ret = -ENOMEM;
- goto unmap;
- }
-
- if (request_irq(dev->irq, cardu_interrupt, IRQF_SHARED, socket->name, socket) < 0) {
- ret = -EBUSY;
- goto unregister;
- }
-
- printk(KERN_INFO "%s at %#08lx, IRQ %d\n", socket->name, start, dev->irq);
-
- return 0;
-
-unregister:
- pcmcia_unregister_socket(socket->pcmcia_socket);
- socket->pcmcia_socket = NULL;
-unmap:
- iounmap(socket->base);
- socket->base = NULL;
-release:
- pci_release_regions(dev);
-disable:
- pci_disable_device(dev);
- return ret;
-}
-
-static int vrc4173_cardu_setup(char *options)
-{
- if (options == NULL || *options == '\0')
- return 1;
-
- if (strncmp(options, "cardu1:", 7) == 0) {
- options += 7;
- if (*options != '\0') {
- if (strncmp(options, "noprobe", 7) == 0) {
- cardu_sockets[CARDU1].noprobe = 1;
- options += 7;
- }
-
- if (*options != ',')
- return 1;
- } else
- return 1;
- }
-
- if (strncmp(options, "cardu2:", 7) == 0) {
- options += 7;
- if ((*options != '\0') && (strncmp(options, "noprobe", 7) == 0))
- cardu_sockets[CARDU2].noprobe = 1;
- }
-
- return 1;
-}
-
-__setup("vrc4173_cardu=", vrc4173_cardu_setup);
-
-static const struct pci_device_id vrc4173_cardu_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NAPCCARD) },
- {0, }
-};
-
-static struct pci_driver vrc4173_cardu_driver = {
- .name = "NEC VRC4173 CARDU",
- .probe = vrc4173_cardu_probe,
- .id_table = vrc4173_cardu_id_table,
-};
-
-static int vrc4173_cardu_init(void)
-{
- vrc4173_cardu_slots = 0;
-
- return pci_register_driver(&vrc4173_cardu_driver);
-}
-
-static void vrc4173_cardu_exit(void)
-{
- pci_unregister_driver(&vrc4173_cardu_driver);
-}
-
-module_init(vrc4173_cardu_init);
-module_exit(vrc4173_cardu_exit);
-MODULE_DEVICE_TABLE(pci, vrc4173_cardu_id_table);
diff --git a/drivers/pcmcia/vrc4173_cardu.h b/drivers/pcmcia/vrc4173_cardu.h
deleted file mode 100644
index a7d96018ed8d..000000000000
--- a/drivers/pcmcia/vrc4173_cardu.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * FILE NAME
- * drivers/pcmcia/vrc4173_cardu.h
- *
- * BRIEF MODULE DESCRIPTION
- * Include file for NEC VRC4173 CARDU.
- *
- * Copyright 2002 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#ifndef _VRC4173_CARDU_H
-#define _VRC4173_CARDU_H
-
-#include <linux/pci.h>
-
-#include <pcmcia/ss.h>
-
-#define CARDU_MAX_SOCKETS 2
-#define CARDU1 0
-#define CARDU2 1
-
-/*
- * PCI Configuration Registers
- */
-#define BRGCNT 0x3e
- #define POST_WR_EN 0x0400
- #define MEM1_PREF_EN 0x0200
- #define MEM0_PREF_EN 0x0100
- #define IREQ_INT 0x0080
- #define CARD_RST 0x0040
- #define MABORT_MODE 0x0020
- #define VGA_EN 0x0008
- #define ISA_EN 0x0004
- #define SERR_EN 0x0002
- #define PERR_EN 0x0001
-
-#define SYSCNT 0x80
- #define BAD_VCC_REQ_DISB 0x00200000
- #define PCPCI_EN 0x00080000
- #define CH_ASSIGN_MASK 0x00070000
- #define CH_ASSIGN_NODMA 0x00040000
- #define SUB_ID_WR_EN 0x00000008
- #define ASYN_INT_MODE 0x00000004
- #define PCI_CLK_RIN 0x00000002
-
-#define DEVCNT 0x91
- #define ZOOM_VIDEO_EN 0x40
- #define SR_PCI_INT_SEL_MASK 0x18
- #define SR_PCI_INT_SEL_NONE 0x00
- #define PCI_INT_MODE 0x04
- #define IRQ_MODE 0x02
- #define IFG 0x01
-
-#define CHIPCNT 0x9c
- #define S_PREF_DISB 0x10
-
-#define SERRDIS 0x9f
- #define SERR_DIS_MAB 0x10
- #define SERR_DIS_TAB 0x08
- #define SERR_DIS_DT_PERR 0x04
-
-/*
- * ExCA Registers
- */
-#define EXCA_REGS_BASE 0x800
-#define EXCA_REGS_SIZE 0x800
-
-#define ID_REV 0x000
- #define IF_TYPE_16BIT 0x80
-
-#define IF_STATUS 0x001
- #define CARD_PWR 0x40
- #define READY 0x20
- #define CARD_WP 0x10
- #define CARD_DETECT2 0x08
- #define CARD_DETECT1 0x04
- #define BV_DETECT_MASK 0x03
- #define BV_DETECT_GOOD 0x03 /* Memory card */
- #define BV_DETECT_WARN 0x02
- #define BV_DETECT_BAD1 0x01
- #define BV_DETECT_BAD0 0x00
- #define STSCHG 0x02 /* I/O card */
- #define SPKR 0x01
-
-#define PWR_CNT 0x002
- #define CARD_OUT_EN 0x80
- #define VCC_MASK 0x18
- #define VCC_3V 0x18
- #define VCC_5V 0x10
- #define VCC_0V 0x00
- #define VPP_MASK 0x03
- #define VPP_12V 0x02
- #define VPP_VCC 0x01
- #define VPP_0V 0x00
-
-#define INT_GEN_CNT 0x003
- #define CARD_REST0 0x40
- #define CARD_TYPE_MASK 0x20
- #define CARD_TYPE_IO 0x20
- #define CARD_TYPE_MEM 0x00
-
-#define CARD_SC 0x004
- #define CARD_DT_CHG 0x08
- #define RDY_CHG 0x04
- #define BAT_WAR_CHG 0x02
- #define BAT_DEAD_ST_CHG 0x01
-
-#define CARD_SCI 0x005
- #define CARD_DT_EN 0x08
- #define RDY_EN 0x04
- #define BAT_WAR_EN 0x02
- #define BAT_DEAD_EN 0x01
-
-#define ADR_WIN_EN 0x006
- #define IO_WIN_EN(x) (0x40 << (x))
- #define MEM_WIN_EN(x) (0x01 << (x))
-
-#define IO_WIN_CNT 0x007
- #define IO_WIN_CNT_MASK(x) (0x03 << ((x) << 2))
- #define IO_WIN_DATA_AUTOSZ(x) (0x02 << ((x) << 2))
- #define IO_WIN_DATA_16BIT(x) (0x01 << ((x) << 2))
-
-#define IO_WIN_SA(x) (0x008 + ((x) << 2))
-#define IO_WIN_EA(x) (0x00a + ((x) << 2))
-
-#define MEM_WIN_SA(x) (0x010 + ((x) << 3))
- #define MEM_WIN_DSIZE 0x8000
-
-#define MEM_WIN_EA(x) (0x012 + ((x) << 3))
-
-#define MEM_WIN_OA(x) (0x014 + ((x) << 3))
- #define MEM_WIN_WP 0x8000
- #define MEM_WIN_REGSET 0x4000
-
-#define GEN_CNT 0x016
- #define VS2_STATUS 0x80
- #define VS1_STATUS 0x40
- #define EXCA_REG_RST_EN 0x02
-
-#define GLO_CNT 0x01e
- #define FUN_INT_LEV 0x08
- #define INT_WB_CLR 0x04
- #define CSC_INT_LEV 0x02
-
-#define IO_WIN_OAL(x) (0x036 + ((x) << 1))
-#define IO_WIN_OAH(x) (0x037 + ((x) << 1))
-
-#define MEM_WIN_SAU(x) (0x040 + (x))
-
-#define IO_SETUP_TIM 0x080
-#define IO_CMD_TIM 0x081
-#define IO_HOLD_TIM 0x082
-#define MEM_SETUP_TIM(x) (0x084 + ((x) << 2))
-#define MEM_CMD_TIM(x) (0x085 + ((x) << 2))
-#define MEM_HOLD_TIM(x) (0x086 + ((x) << 2))
- #define TIM_CLOCKS(x) ((x) - 1)
-
-#define MEM_TIM_SEL1 0x08c
-#define MEM_TIM_SEL2 0x08d
- #define MEM_WIN_TIMSEL1(x) (0x03 << (((x) & 3) << 1))
-
-#define MEM_WIN_PWEN 0x091
- #define POSTWEN 0x01
-
-/*
- * CardBus Socket Registers
- */
-#define CARDBUS_SOCKET_REGS_BASE 0x000
-#define CARDBUS_SOCKET_REGS_SIZE 0x800
-
-#define SKT_EV 0x000
- #define POW_CYC_EV 0x00000008
- #define CCD2_EV 0x00000004
- #define CCD1_EV 0x00000002
- #define CSTSCHG_EV 0x00000001
-
-#define SKT_MASK 0x004
- #define POW_CYC_MASK 0x00000008
- #define CCD_MASK 0x00000006
- #define CSC_MASK 0x00000001
-
-#define SKT_PRE_STATE 0x008
-#define SKT_FORCE_EV 0x00c
- #define VOL_3V_SKT 0x20000000
- #define VOL_5V_SKT 0x10000000
- #define CVS_TEST 0x00004000
- #define VOL_YV_CARD_DT 0x00002000
- #define VOL_XV_CARD_DT 0x00001000
- #define VOL_3V_CARD_DT 0x00000800
- #define VOL_5V_CARD_DT 0x00000400
- #define BAD_VCC_REQ 0x00000200
- #define DATA_LOST 0x00000100
- #define NOT_A_CARD 0x00000080
- #define CREADY 0x00000040
- #define CB_CARD_DT 0x00000020
- #define R2_CARD_DT 0x00000010
- #define POW_UP 0x00000008
- #define CCD20 0x00000004
- #define CCD10 0x00000002
- #define CSTSCHG 0x00000001
-
-#define SKT_CNT 0x010
- #define STP_CLK_EN 0x00000080
- #define VCC_CNT_MASK 0x00000070
- #define VCC_CNT_3V 0x00000030
- #define VCC_CNT_5V 0x00000020
- #define VCC_CNT_0V 0x00000000
- #define VPP_CNT_MASK 0x00000007
- #define VPP_CNT_3V 0x00000003
- #define VPP_CNT_5V 0x00000002
- #define VPP_CNT_12V 0x00000001
- #define VPP_CNT_0V 0x00000000
-
-typedef struct vrc4173_socket {
- int noprobe;
- struct pci_dev *dev;
- void *base;
- void (*handler)(void *, unsigned int);
- void *info;
- socket_cap_t cap;
- spinlock_t event_lock;
- uint16_t events;
- struct socket_info_t *pcmcia_socket;
- struct work_struct tq_work;
- char name[20];
-} vrc4173_socket_t;
-
-#endif /* _VRC4173_CARDU_H */
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 794a37d50853..cb2f55f450e4 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -726,11 +726,6 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
return per_cpu(hw_events->irq, cpu);
}
-bool arm_pmu_irq_is_nmi(void)
-{
- return has_nmi;
-}
-
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
diff --git a/drivers/phy/ingenic/Makefile b/drivers/phy/ingenic/Makefile
index 65d5ea00fc9d..1cb158d7233f 100644
--- a/drivers/phy/ingenic/Makefile
+++ b/drivers/phy/ingenic/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += phy-ingenic-usb.o
+obj-$(CONFIG_PHY_INGENIC_USB) += phy-ingenic-usb.o
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index d38def43b1bf..55f8e6c048ab 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -49,7 +49,9 @@ config PHY_MTK_HDMI
config PHY_MTK_MIPI_DSI
tristate "MediaTek MIPI-DSI Driver"
- depends on ARCH_MEDIATEK && OF
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on OF
select GENERIC_PHY
help
Support MIPI DSI for Mediatek SoCs.
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 442522ba487f..4728e2bff662 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
error = PTR_ERR(generic_phy);
- return PTR_ERR(generic_phy);
+ goto out_reg_disable;
}
phy_set_drvdata(generic_phy, ddata);
phy_provider = devm_of_phy_provider_register(ddata->dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
+ if (IS_ERR(phy_provider)) {
+ error = PTR_ERR(phy_provider);
+ goto out_reg_disable;
+ }
error = cpcap_usb_init_optional_pins(ddata);
if (error)
- return error;
+ goto out_reg_disable;
cpcap_usb_init_optional_gpios(ddata);
error = cpcap_usb_init_iio(ddata);
if (error)
- return error;
+ goto out_reg_disable;
error = cpcap_usb_init_interrupts(pdev, ddata);
if (error)
- return error;
+ goto out_reg_disable;
usb_add_phy_dev(&ddata->phy);
atomic_set(&ddata->active, 1);
schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
return 0;
+
+out_reg_disable:
+ regulator_disable(ddata->vusb);
+
+ return error;
}
static int cpcap_usb_phy_remove(struct platform_device *pdev)
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 453acce3d0c3..d4b2f2e2ed75 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -82,6 +82,7 @@ config PINCTRL_AT91
config PINCTRL_AT91PIO4
bool "AT91 PIO4 pinctrl driver"
depends on OF
+ depends on HAS_IOMEM
depends on ARCH_AT91 || COMPILE_TEST
select PINMUX
select GENERIC_PINCONF
@@ -374,6 +375,25 @@ config PINCTRL_OCELOT
select OF_GPIO
select REGMAP_MMIO
+config PINCTRL_MICROCHIP_SGPIO
+ bool "Pinctrl driver for Microsemi/Microchip Serial GPIO"
+ depends on OF
+ depends on HAS_IOMEM
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select OF_GPIO
+ help
+ Support for the serial GPIO interface used on Microsemi and
+ Microchip SoC's. By using a serial interface, the SIO
+ controller significantly extends the number of available
+ GPIOs with a minimum number of additional pins on the
+ device. The primary purpose of the SIO controller is to
+ connect control signals from SFP modules and to act as an
+ LED controller.
+
source "drivers/pinctrl/actions/Kconfig"
source "drivers/pinctrl/aspeed/Kconfig"
source "drivers/pinctrl/bcm/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 3cdb6529db95..5bb9bb6cc3ce 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
+obj-$(CONFIG_PINCTRL_MICROCHIP_SGPIO) += pinctrl-microchip-sgpio.o
obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
obj-y += actions/
diff --git a/drivers/pinctrl/actions/pinctrl-s500.c b/drivers/pinctrl/actions/pinctrl-s500.c
index 38e30914af6e..ced778079b76 100644
--- a/drivers/pinctrl/actions/pinctrl-s500.c
+++ b/drivers/pinctrl/actions/pinctrl-s500.c
@@ -1485,7 +1485,7 @@ static PAD_PULLCTL_CONF(DNAND_D6, 2, 2, 1);
static PAD_PULLCTL_CONF(DNAND_D7, 2, 2, 1);
/* Pad info table */
-static struct owl_padinfo s500_padinfo[NUM_PADS] = {
+static const struct owl_padinfo s500_padinfo[NUM_PADS] = {
[DNAND_DQS] = PAD_INFO_PULLCTL(DNAND_DQS),
[DNAND_DQSN] = PAD_INFO_PULLCTL(DNAND_DQSN),
[ETH_TXD0] = PAD_INFO_ST(ETH_TXD0),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 34803a6c7664..5c1a109842a7 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -347,7 +347,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
#define D22 40
SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
-SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8));
+SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
GROUP_DECL(PWM8G0, D22);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 3663d87f51a0..9fc4433fece4 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1602,9 +1602,11 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
struct pinctrl_dev *pctldev = s->private;
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
unsigned i, pin;
+#ifdef CONFIG_GPIOLIB
struct pinctrl_gpio_range *range;
unsigned int gpio_num;
struct gpio_chip *chip;
+#endif
seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index a1fbb3b9ae34..f294336430cc 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -24,13 +24,6 @@ config PINCTRL_IMX1
help
Say Y here to enable the imx1 pinctrl driver
-config PINCTRL_IMX21
- bool "i.MX21 pinctrl driver"
- depends on SOC_IMX21
- select PINCTRL_IMX1_CORE
- help
- Say Y here to enable the i.MX21 pinctrl driver
-
config PINCTRL_IMX27
bool "IMX27 pinctrl driver"
depends on SOC_IMX27
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index c61722565289..e476cb671037 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -4,7 +4,6 @@ obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
obj-$(CONFIG_PINCTRL_IMX_SCU) += pinctrl-scu.o
obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
obj-$(CONFIG_PINCTRL_IMX1) += pinctrl-imx1.o
-obj-$(CONFIG_PINCTRL_IMX21) += pinctrl-imx21.o
obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o
obj-$(CONFIG_PINCTRL_IMX35) += pinctrl-imx35.o
obj-$(CONFIG_PINCTRL_IMX50) += pinctrl-imx50.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c
deleted file mode 100644
index 8a102275a053..000000000000
--- a/drivers/pinctrl/freescale/pinctrl-imx21.c
+++ /dev/null
@@ -1,330 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-//
-// i.MX21 pinctrl driver based on imx pinmux core
-//
-// Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pinctrl/pinctrl.h>
-
-#include "pinctrl-imx1.h"
-
-#define PAD_ID(port, pin) ((port) * 32 + (pin))
-#define PA 0
-#define PB 1
-#define PC 2
-#define PD 3
-#define PE 4
-#define PF 5
-
-enum imx21_pads {
- MX21_PAD_LSCLK = PAD_ID(PA, 5),
- MX21_PAD_LD0 = PAD_ID(PA, 6),
- MX21_PAD_LD1 = PAD_ID(PA, 7),
- MX21_PAD_LD2 = PAD_ID(PA, 8),
- MX21_PAD_LD3 = PAD_ID(PA, 9),
- MX21_PAD_LD4 = PAD_ID(PA, 10),
- MX21_PAD_LD5 = PAD_ID(PA, 11),
- MX21_PAD_LD6 = PAD_ID(PA, 12),
- MX21_PAD_LD7 = PAD_ID(PA, 13),
- MX21_PAD_LD8 = PAD_ID(PA, 14),
- MX21_PAD_LD9 = PAD_ID(PA, 15),
- MX21_PAD_LD10 = PAD_ID(PA, 16),
- MX21_PAD_LD11 = PAD_ID(PA, 17),
- MX21_PAD_LD12 = PAD_ID(PA, 18),
- MX21_PAD_LD13 = PAD_ID(PA, 19),
- MX21_PAD_LD14 = PAD_ID(PA, 20),
- MX21_PAD_LD15 = PAD_ID(PA, 21),
- MX21_PAD_LD16 = PAD_ID(PA, 22),
- MX21_PAD_LD17 = PAD_ID(PA, 23),
- MX21_PAD_REV = PAD_ID(PA, 24),
- MX21_PAD_CLS = PAD_ID(PA, 25),
- MX21_PAD_PS = PAD_ID(PA, 26),
- MX21_PAD_SPL_SPR = PAD_ID(PA, 27),
- MX21_PAD_HSYNC = PAD_ID(PA, 28),
- MX21_PAD_VSYNC = PAD_ID(PA, 29),
- MX21_PAD_CONTRAST = PAD_ID(PA, 30),
- MX21_PAD_OE_ACD = PAD_ID(PA, 31),
- MX21_PAD_SD2_D0 = PAD_ID(PB, 4),
- MX21_PAD_SD2_D1 = PAD_ID(PB, 5),
- MX21_PAD_SD2_D2 = PAD_ID(PB, 6),
- MX21_PAD_SD2_D3 = PAD_ID(PB, 7),
- MX21_PAD_SD2_CMD = PAD_ID(PB, 8),
- MX21_PAD_SD2_CLK = PAD_ID(PB, 9),
- MX21_PAD_CSI_D0 = PAD_ID(PB, 10),
- MX21_PAD_CSI_D1 = PAD_ID(PB, 11),
- MX21_PAD_CSI_D2 = PAD_ID(PB, 12),
- MX21_PAD_CSI_D3 = PAD_ID(PB, 13),
- MX21_PAD_CSI_D4 = PAD_ID(PB, 14),
- MX21_PAD_CSI_MCLK = PAD_ID(PB, 15),
- MX21_PAD_CSI_PIXCLK = PAD_ID(PB, 16),
- MX21_PAD_CSI_D5 = PAD_ID(PB, 17),
- MX21_PAD_CSI_D6 = PAD_ID(PB, 18),
- MX21_PAD_CSI_D7 = PAD_ID(PB, 19),
- MX21_PAD_CSI_VSYNC = PAD_ID(PB, 20),
- MX21_PAD_CSI_HSYNC = PAD_ID(PB, 21),
- MX21_PAD_USB_BYP = PAD_ID(PB, 22),
- MX21_PAD_USB_PWR = PAD_ID(PB, 23),
- MX21_PAD_USB_OC = PAD_ID(PB, 24),
- MX21_PAD_USBH_ON = PAD_ID(PB, 25),
- MX21_PAD_USBH1_FS = PAD_ID(PB, 26),
- MX21_PAD_USBH1_OE = PAD_ID(PB, 27),
- MX21_PAD_USBH1_TXDM = PAD_ID(PB, 28),
- MX21_PAD_USBH1_TXDP = PAD_ID(PB, 29),
- MX21_PAD_USBH1_RXDM = PAD_ID(PB, 30),
- MX21_PAD_USBH1_RXDP = PAD_ID(PB, 31),
- MX21_PAD_USBG_SDA = PAD_ID(PC, 5),
- MX21_PAD_USBG_SCL = PAD_ID(PC, 6),
- MX21_PAD_USBG_ON = PAD_ID(PC, 7),
- MX21_PAD_USBG_FS = PAD_ID(PC, 8),
- MX21_PAD_USBG_OE = PAD_ID(PC, 9),
- MX21_PAD_USBG_TXDM = PAD_ID(PC, 10),
- MX21_PAD_USBG_TXDP = PAD_ID(PC, 11),
- MX21_PAD_USBG_RXDM = PAD_ID(PC, 12),
- MX21_PAD_USBG_RXDP = PAD_ID(PC, 13),
- MX21_PAD_TOUT = PAD_ID(PC, 14),
- MX21_PAD_TIN = PAD_ID(PC, 15),
- MX21_PAD_SAP_FS = PAD_ID(PC, 16),
- MX21_PAD_SAP_RXD = PAD_ID(PC, 17),
- MX21_PAD_SAP_TXD = PAD_ID(PC, 18),
- MX21_PAD_SAP_CLK = PAD_ID(PC, 19),
- MX21_PAD_SSI1_FS = PAD_ID(PC, 20),
- MX21_PAD_SSI1_RXD = PAD_ID(PC, 21),
- MX21_PAD_SSI1_TXD = PAD_ID(PC, 22),
- MX21_PAD_SSI1_CLK = PAD_ID(PC, 23),
- MX21_PAD_SSI2_FS = PAD_ID(PC, 24),
- MX21_PAD_SSI2_RXD = PAD_ID(PC, 25),
- MX21_PAD_SSI2_TXD = PAD_ID(PC, 26),
- MX21_PAD_SSI2_CLK = PAD_ID(PC, 27),
- MX21_PAD_SSI3_FS = PAD_ID(PC, 28),
- MX21_PAD_SSI3_RXD = PAD_ID(PC, 29),
- MX21_PAD_SSI3_TXD = PAD_ID(PC, 30),
- MX21_PAD_SSI3_CLK = PAD_ID(PC, 31),
- MX21_PAD_I2C_DATA = PAD_ID(PD, 17),
- MX21_PAD_I2C_CLK = PAD_ID(PD, 18),
- MX21_PAD_CSPI2_SS2 = PAD_ID(PD, 19),
- MX21_PAD_CSPI2_SS1 = PAD_ID(PD, 20),
- MX21_PAD_CSPI2_SS0 = PAD_ID(PD, 21),
- MX21_PAD_CSPI2_SCLK = PAD_ID(PD, 22),
- MX21_PAD_CSPI2_MISO = PAD_ID(PD, 23),
- MX21_PAD_CSPI2_MOSI = PAD_ID(PD, 24),
- MX21_PAD_CSPI1_RDY = PAD_ID(PD, 25),
- MX21_PAD_CSPI1_SS2 = PAD_ID(PD, 26),
- MX21_PAD_CSPI1_SS1 = PAD_ID(PD, 27),
- MX21_PAD_CSPI1_SS0 = PAD_ID(PD, 28),
- MX21_PAD_CSPI1_SCLK = PAD_ID(PD, 29),
- MX21_PAD_CSPI1_MISO = PAD_ID(PD, 30),
- MX21_PAD_CSPI1_MOSI = PAD_ID(PD, 31),
- MX21_PAD_TEST_WB2 = PAD_ID(PE, 0),
- MX21_PAD_TEST_WB1 = PAD_ID(PE, 1),
- MX21_PAD_TEST_WB0 = PAD_ID(PE, 2),
- MX21_PAD_UART2_CTS = PAD_ID(PE, 3),
- MX21_PAD_UART2_RTS = PAD_ID(PE, 4),
- MX21_PAD_PWMO = PAD_ID(PE, 5),
- MX21_PAD_UART2_TXD = PAD_ID(PE, 6),
- MX21_PAD_UART2_RXD = PAD_ID(PE, 7),
- MX21_PAD_UART3_TXD = PAD_ID(PE, 8),
- MX21_PAD_UART3_RXD = PAD_ID(PE, 9),
- MX21_PAD_UART3_CTS = PAD_ID(PE, 10),
- MX21_PAD_UART3_RTS = PAD_ID(PE, 11),
- MX21_PAD_UART1_TXD = PAD_ID(PE, 12),
- MX21_PAD_UART1_RXD = PAD_ID(PE, 13),
- MX21_PAD_UART1_CTS = PAD_ID(PE, 14),
- MX21_PAD_UART1_RTS = PAD_ID(PE, 15),
- MX21_PAD_RTCK = PAD_ID(PE, 16),
- MX21_PAD_RESET_OUT = PAD_ID(PE, 17),
- MX21_PAD_SD1_D0 = PAD_ID(PE, 18),
- MX21_PAD_SD1_D1 = PAD_ID(PE, 19),
- MX21_PAD_SD1_D2 = PAD_ID(PE, 20),
- MX21_PAD_SD1_D3 = PAD_ID(PE, 21),
- MX21_PAD_SD1_CMD = PAD_ID(PE, 22),
- MX21_PAD_SD1_CLK = PAD_ID(PE, 23),
- MX21_PAD_NFRB = PAD_ID(PF, 0),
- MX21_PAD_NFCE = PAD_ID(PF, 1),
- MX21_PAD_NFWP = PAD_ID(PF, 2),
- MX21_PAD_NFCLE = PAD_ID(PF, 3),
- MX21_PAD_NFALE = PAD_ID(PF, 4),
- MX21_PAD_NFRE = PAD_ID(PF, 5),
- MX21_PAD_NFWE = PAD_ID(PF, 6),
- MX21_PAD_NFIO0 = PAD_ID(PF, 7),
- MX21_PAD_NFIO1 = PAD_ID(PF, 8),
- MX21_PAD_NFIO2 = PAD_ID(PF, 9),
- MX21_PAD_NFIO3 = PAD_ID(PF, 10),
- MX21_PAD_NFIO4 = PAD_ID(PF, 11),
- MX21_PAD_NFIO5 = PAD_ID(PF, 12),
- MX21_PAD_NFIO6 = PAD_ID(PF, 13),
- MX21_PAD_NFIO7 = PAD_ID(PF, 14),
- MX21_PAD_CLKO = PAD_ID(PF, 15),
- MX21_PAD_RESERVED = PAD_ID(PF, 16),
- MX21_PAD_CS4 = PAD_ID(PF, 21),
- MX21_PAD_CS5 = PAD_ID(PF, 22),
-};
-
-/* Pad names for the pinmux subsystem */
-static const struct pinctrl_pin_desc imx21_pinctrl_pads[] = {
- IMX_PINCTRL_PIN(MX21_PAD_LSCLK),
- IMX_PINCTRL_PIN(MX21_PAD_LD0),
- IMX_PINCTRL_PIN(MX21_PAD_LD1),
- IMX_PINCTRL_PIN(MX21_PAD_LD2),
- IMX_PINCTRL_PIN(MX21_PAD_LD3),
- IMX_PINCTRL_PIN(MX21_PAD_LD4),
- IMX_PINCTRL_PIN(MX21_PAD_LD5),
- IMX_PINCTRL_PIN(MX21_PAD_LD6),
- IMX_PINCTRL_PIN(MX21_PAD_LD7),
- IMX_PINCTRL_PIN(MX21_PAD_LD8),
- IMX_PINCTRL_PIN(MX21_PAD_LD9),
- IMX_PINCTRL_PIN(MX21_PAD_LD10),
- IMX_PINCTRL_PIN(MX21_PAD_LD11),
- IMX_PINCTRL_PIN(MX21_PAD_LD12),
- IMX_PINCTRL_PIN(MX21_PAD_LD13),
- IMX_PINCTRL_PIN(MX21_PAD_LD14),
- IMX_PINCTRL_PIN(MX21_PAD_LD15),
- IMX_PINCTRL_PIN(MX21_PAD_LD16),
- IMX_PINCTRL_PIN(MX21_PAD_LD17),
- IMX_PINCTRL_PIN(MX21_PAD_REV),
- IMX_PINCTRL_PIN(MX21_PAD_CLS),
- IMX_PINCTRL_PIN(MX21_PAD_PS),
- IMX_PINCTRL_PIN(MX21_PAD_SPL_SPR),
- IMX_PINCTRL_PIN(MX21_PAD_HSYNC),
- IMX_PINCTRL_PIN(MX21_PAD_VSYNC),
- IMX_PINCTRL_PIN(MX21_PAD_CONTRAST),
- IMX_PINCTRL_PIN(MX21_PAD_OE_ACD),
- IMX_PINCTRL_PIN(MX21_PAD_SD2_D0),
- IMX_PINCTRL_PIN(MX21_PAD_SD2_D1),
- IMX_PINCTRL_PIN(MX21_PAD_SD2_D2),
- IMX_PINCTRL_PIN(MX21_PAD_SD2_D3),
- IMX_PINCTRL_PIN(MX21_PAD_SD2_CMD),
- IMX_PINCTRL_PIN(MX21_PAD_SD2_CLK),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_D0),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_D1),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_D2),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_D3),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_D4),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_MCLK),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_PIXCLK),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_D5),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_D6),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_D7),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_VSYNC),
- IMX_PINCTRL_PIN(MX21_PAD_CSI_HSYNC),
- IMX_PINCTRL_PIN(MX21_PAD_USB_BYP),
- IMX_PINCTRL_PIN(MX21_PAD_USB_PWR),
- IMX_PINCTRL_PIN(MX21_PAD_USB_OC),
- IMX_PINCTRL_PIN(MX21_PAD_USBH_ON),
- IMX_PINCTRL_PIN(MX21_PAD_USBH1_FS),
- IMX_PINCTRL_PIN(MX21_PAD_USBH1_OE),
- IMX_PINCTRL_PIN(MX21_PAD_USBH1_TXDM),
- IMX_PINCTRL_PIN(MX21_PAD_USBH1_TXDP),
- IMX_PINCTRL_PIN(MX21_PAD_USBH1_RXDM),
- IMX_PINCTRL_PIN(MX21_PAD_USBH1_RXDP),
- IMX_PINCTRL_PIN(MX21_PAD_USBG_SDA),
- IMX_PINCTRL_PIN(MX21_PAD_USBG_SCL),
- IMX_PINCTRL_PIN(MX21_PAD_USBG_ON),
- IMX_PINCTRL_PIN(MX21_PAD_USBG_FS),
- IMX_PINCTRL_PIN(MX21_PAD_USBG_OE),
- IMX_PINCTRL_PIN(MX21_PAD_USBG_TXDM),
- IMX_PINCTRL_PIN(MX21_PAD_USBG_TXDP),
- IMX_PINCTRL_PIN(MX21_PAD_USBG_RXDM),
- IMX_PINCTRL_PIN(MX21_PAD_USBG_RXDP),
- IMX_PINCTRL_PIN(MX21_PAD_TOUT),
- IMX_PINCTRL_PIN(MX21_PAD_TIN),
- IMX_PINCTRL_PIN(MX21_PAD_SAP_FS),
- IMX_PINCTRL_PIN(MX21_PAD_SAP_RXD),
- IMX_PINCTRL_PIN(MX21_PAD_SAP_TXD),
- IMX_PINCTRL_PIN(MX21_PAD_SAP_CLK),
- IMX_PINCTRL_PIN(MX21_PAD_SSI1_FS),
- IMX_PINCTRL_PIN(MX21_PAD_SSI1_RXD),
- IMX_PINCTRL_PIN(MX21_PAD_SSI1_TXD),
- IMX_PINCTRL_PIN(MX21_PAD_SSI1_CLK),
- IMX_PINCTRL_PIN(MX21_PAD_SSI2_FS),
- IMX_PINCTRL_PIN(MX21_PAD_SSI2_RXD),
- IMX_PINCTRL_PIN(MX21_PAD_SSI2_TXD),
- IMX_PINCTRL_PIN(MX21_PAD_SSI2_CLK),
- IMX_PINCTRL_PIN(MX21_PAD_SSI3_FS),
- IMX_PINCTRL_PIN(MX21_PAD_SSI3_RXD),
- IMX_PINCTRL_PIN(MX21_PAD_SSI3_TXD),
- IMX_PINCTRL_PIN(MX21_PAD_SSI3_CLK),
- IMX_PINCTRL_PIN(MX21_PAD_I2C_DATA),
- IMX_PINCTRL_PIN(MX21_PAD_I2C_CLK),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SS2),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SS1),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SS0),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SCLK),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI2_MISO),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI2_MOSI),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI1_RDY),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SS2),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SS1),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SS0),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SCLK),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI1_MISO),
- IMX_PINCTRL_PIN(MX21_PAD_CSPI1_MOSI),
- IMX_PINCTRL_PIN(MX21_PAD_TEST_WB2),
- IMX_PINCTRL_PIN(MX21_PAD_TEST_WB1),
- IMX_PINCTRL_PIN(MX21_PAD_TEST_WB0),
- IMX_PINCTRL_PIN(MX21_PAD_UART2_CTS),
- IMX_PINCTRL_PIN(MX21_PAD_UART2_RTS),
- IMX_PINCTRL_PIN(MX21_PAD_PWMO),
- IMX_PINCTRL_PIN(MX21_PAD_UART2_TXD),
- IMX_PINCTRL_PIN(MX21_PAD_UART2_RXD),
- IMX_PINCTRL_PIN(MX21_PAD_UART3_TXD),
- IMX_PINCTRL_PIN(MX21_PAD_UART3_RXD),
- IMX_PINCTRL_PIN(MX21_PAD_UART3_CTS),
- IMX_PINCTRL_PIN(MX21_PAD_UART3_RTS),
- IMX_PINCTRL_PIN(MX21_PAD_UART1_TXD),
- IMX_PINCTRL_PIN(MX21_PAD_UART1_RXD),
- IMX_PINCTRL_PIN(MX21_PAD_UART1_CTS),
- IMX_PINCTRL_PIN(MX21_PAD_UART1_RTS),
- IMX_PINCTRL_PIN(MX21_PAD_RTCK),
- IMX_PINCTRL_PIN(MX21_PAD_RESET_OUT),
- IMX_PINCTRL_PIN(MX21_PAD_SD1_D0),
- IMX_PINCTRL_PIN(MX21_PAD_SD1_D1),
- IMX_PINCTRL_PIN(MX21_PAD_SD1_D2),
- IMX_PINCTRL_PIN(MX21_PAD_SD1_D3),
- IMX_PINCTRL_PIN(MX21_PAD_SD1_CMD),
- IMX_PINCTRL_PIN(MX21_PAD_SD1_CLK),
- IMX_PINCTRL_PIN(MX21_PAD_NFRB),
- IMX_PINCTRL_PIN(MX21_PAD_NFCE),
- IMX_PINCTRL_PIN(MX21_PAD_NFWP),
- IMX_PINCTRL_PIN(MX21_PAD_NFCLE),
- IMX_PINCTRL_PIN(MX21_PAD_NFALE),
- IMX_PINCTRL_PIN(MX21_PAD_NFRE),
- IMX_PINCTRL_PIN(MX21_PAD_NFWE),
- IMX_PINCTRL_PIN(MX21_PAD_NFIO0),
- IMX_PINCTRL_PIN(MX21_PAD_NFIO1),
- IMX_PINCTRL_PIN(MX21_PAD_NFIO2),
- IMX_PINCTRL_PIN(MX21_PAD_NFIO3),
- IMX_PINCTRL_PIN(MX21_PAD_NFIO4),
- IMX_PINCTRL_PIN(MX21_PAD_NFIO5),
- IMX_PINCTRL_PIN(MX21_PAD_NFIO6),
- IMX_PINCTRL_PIN(MX21_PAD_NFIO7),
- IMX_PINCTRL_PIN(MX21_PAD_CLKO),
- IMX_PINCTRL_PIN(MX21_PAD_RESERVED),
- IMX_PINCTRL_PIN(MX21_PAD_CS4),
- IMX_PINCTRL_PIN(MX21_PAD_CS5),
-};
-
-static struct imx1_pinctrl_soc_info imx21_pinctrl_info = {
- .pins = imx21_pinctrl_pads,
- .npins = ARRAY_SIZE(imx21_pinctrl_pads),
-};
-
-static int __init imx21_pinctrl_probe(struct platform_device *pdev)
-{
- return imx1_pinctrl_core_probe(pdev, &imx21_pinctrl_info);
-}
-
-static const struct of_device_id imx21_pinctrl_of_match[] = {
- { .compatible = "fsl,imx21-iomuxc", },
- { }
-};
-
-static struct platform_driver imx21_pinctrl_driver = {
- .driver = {
- .name = "imx21-pinctrl",
- .of_match_table = imx21_pinctrl_of_match,
- },
-};
-builtin_platform_driver_probe(imx21_pinctrl_driver, imx21_pinctrl_probe);
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 28e5f824ba45..fb1495bd77c4 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -55,6 +55,14 @@ config PINCTRL_INTEL
select GPIOLIB
select GPIOLIB_IRQCHIP
+config PINCTRL_ALDERLAKE
+ tristate "Intel Alder Lake pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Alder Lake PCH pins and using them as GPIOs.
+
config PINCTRL_BROXTON
tristate "Intel Broxton pinctrl and GPIO driver"
depends on ACPI
@@ -87,6 +95,14 @@ config PINCTRL_DENVERTON
This pinctrl driver provides an interface that allows configuring
of Intel Denverton SoC pins and using them as GPIOs.
+config PINCTRL_ELKHARTLAKE
+ tristate "Intel Elkhart Lake SoC pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Elkhart Lake SoC pins and using them as GPIOs.
+
config PINCTRL_EMMITSBURG
tristate "Intel Emmitsburg pinctrl and GPIO driver"
depends on ACPI
@@ -119,6 +135,14 @@ config PINCTRL_JASPERLAKE
This pinctrl driver provides an interface that allows configuring
of Intel Jasper Lake PCH pins and using them as GPIOs.
+config PINCTRL_LAKEFIELD
+ tristate "Intel Lakefield SoC pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Lakefield SoC pins and using them as GPIOs.
+
config PINCTRL_LEWISBURG
tristate "Intel Lewisburg pinctrl and GPIO driver"
depends on ACPI
@@ -143,4 +167,5 @@ config PINCTRL_TIGERLAKE
help
This pinctrl driver provides an interface that allows configuring
of Intel Tiger Lake PCH pins and using them as GPIOs.
+
endif
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 1c1c316f98b9..181ffcf34d62 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -6,14 +6,17 @@ obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
obj-$(CONFIG_PINCTRL_LYNXPOINT) += pinctrl-lynxpoint.o
obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
+obj-$(CONFIG_PINCTRL_ALDERLAKE) += pinctrl-alderlake.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o
obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o
obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o
+obj-$(CONFIG_PINCTRL_ELKHARTLAKE) += pinctrl-elkhartlake.o
obj-$(CONFIG_PINCTRL_EMMITSBURG) += pinctrl-emmitsburg.o
obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_JASPERLAKE) += pinctrl-jasperlake.o
+obj-$(CONFIG_PINCTRL_LAKEFIELD) += pinctrl-lakefield.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-alderlake.c b/drivers/pinctrl/intel/pinctrl-alderlake.c
new file mode 100644
index 000000000000..efb664f12b5d
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-alderlake.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Alder Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define ADL_PAD_OWN 0x0a0
+#define ADL_PADCFGLOCK 0x110
+#define ADL_HOSTSW_OWN 0x150
+#define ADL_GPI_IS 0x200
+#define ADL_GPI_IE 0x220
+
+#define ADL_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define ADL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = ADL_PAD_OWN, \
+ .padcfglock_offset = ADL_PADCFGLOCK, \
+ .hostown_offset = ADL_HOSTSW_OWN, \
+ .is_offset = ADL_GPI_IS, \
+ .ie_offset = ADL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Alder Lake-S */
+static const struct pinctrl_pin_desc adls_pins[] = {
+ /* GPP_I */
+ PINCTRL_PIN(0, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(1, "DDSP_HPD_1"),
+ PINCTRL_PIN(2, "DDSP_HPD_2"),
+ PINCTRL_PIN(3, "DDSP_HPD_3"),
+ PINCTRL_PIN(4, "DDSP_HPD_4"),
+ PINCTRL_PIN(5, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(6, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(7, "DDPC_CTRLCLK"),
+ PINCTRL_PIN(8, "DDPC_CTRLDATA"),
+ PINCTRL_PIN(9, "GSPI0_CS1B"),
+ PINCTRL_PIN(10, "GSPI1_CS1B"),
+ PINCTRL_PIN(11, "USB2_OCB_4"),
+ PINCTRL_PIN(12, "USB2_OCB_5"),
+ PINCTRL_PIN(13, "USB2_OCB_6"),
+ PINCTRL_PIN(14, "USB2_OCB_7"),
+ PINCTRL_PIN(15, "GSPI0_CS0B"),
+ PINCTRL_PIN(16, "GSPI0_CLK"),
+ PINCTRL_PIN(17, "GSPI0_MISO"),
+ PINCTRL_PIN(18, "GSPI0_MOSI"),
+ PINCTRL_PIN(19, "GSPI1_CS0B"),
+ PINCTRL_PIN(20, "GSPI1_CLK"),
+ PINCTRL_PIN(21, "GSPI1_MISO"),
+ PINCTRL_PIN(22, "GSPI1_MOSI"),
+ PINCTRL_PIN(23, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(24, "GSPI1_CLK_LOOPBK"),
+ /* GPP_R */
+ PINCTRL_PIN(25, "HDA_BCLK"),
+ PINCTRL_PIN(26, "HDA_SYNC"),
+ PINCTRL_PIN(27, "HDA_SDO"),
+ PINCTRL_PIN(28, "HDA_SDI_0"),
+ PINCTRL_PIN(29, "HDA_RSTB"),
+ PINCTRL_PIN(30, "HDA_SDI_1"),
+ PINCTRL_PIN(31, "GPP_R_6"),
+ PINCTRL_PIN(32, "GPP_R_7"),
+ PINCTRL_PIN(33, "GPP_R_8"),
+ PINCTRL_PIN(34, "DDSP_HPD_A"),
+ PINCTRL_PIN(35, "DDSP_HPD_B"),
+ PINCTRL_PIN(36, "DDSP_HPD_C"),
+ PINCTRL_PIN(37, "ISH_SPI_CSB"),
+ PINCTRL_PIN(38, "ISH_SPI_CLK"),
+ PINCTRL_PIN(39, "ISH_SPI_MISO"),
+ PINCTRL_PIN(40, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(41, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(42, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(43, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(44, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(45, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(46, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(47, "GSPI2_CLK_LOOPBK"),
+ /* GPP_J */
+ PINCTRL_PIN(48, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(49, "CPU_C10_GATEB"),
+ PINCTRL_PIN(50, "CNV_BRI_DT"),
+ PINCTRL_PIN(51, "CNV_BRI_RSP"),
+ PINCTRL_PIN(52, "CNV_RGI_DT"),
+ PINCTRL_PIN(53, "CNV_RGI_RSP"),
+ PINCTRL_PIN(54, "CNV_MFUART2_RXD"),
+ PINCTRL_PIN(55, "CNV_MFUART2_TXD"),
+ PINCTRL_PIN(56, "SRCCLKREQB_16"),
+ PINCTRL_PIN(57, "SRCCLKREQB_17"),
+ PINCTRL_PIN(58, "BSSB_LS_RX"),
+ PINCTRL_PIN(59, "BSSB_LS_TX"),
+ /* vGPIO */
+ PINCTRL_PIN(60, "CNV_BTEN"),
+ PINCTRL_PIN(61, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(62, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(63, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(64, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(65, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(66, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(67, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(68, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(69, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(70, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(71, "vUART0_TXD"),
+ PINCTRL_PIN(72, "vUART0_RXD"),
+ PINCTRL_PIN(73, "vUART0_CTS_B"),
+ PINCTRL_PIN(74, "vUART0_RTS_B"),
+ PINCTRL_PIN(75, "vISH_UART0_TXD"),
+ PINCTRL_PIN(76, "vISH_UART0_RXD"),
+ PINCTRL_PIN(77, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(78, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(79, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(80, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(81, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(82, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(83, "vI2S2_SCLK"),
+ PINCTRL_PIN(84, "vI2S2_SFRM"),
+ PINCTRL_PIN(85, "vI2S2_TXD"),
+ PINCTRL_PIN(86, "vI2S2_RXD"),
+ /* vGPIO_0 */
+ PINCTRL_PIN(87, "ESPI_USB_OCB_0"),
+ PINCTRL_PIN(88, "ESPI_USB_OCB_1"),
+ PINCTRL_PIN(89, "ESPI_USB_OCB_2"),
+ PINCTRL_PIN(90, "ESPI_USB_OCB_3"),
+ PINCTRL_PIN(91, "USB_CPU_OCB_0"),
+ PINCTRL_PIN(92, "USB_CPU_OCB_1"),
+ PINCTRL_PIN(93, "USB_CPU_OCB_2"),
+ PINCTRL_PIN(94, "USB_CPU_OCB_3"),
+ /* GPP_B */
+ PINCTRL_PIN(95, "PCIE_LNK_DOWN"),
+ PINCTRL_PIN(96, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(97, "VRALERTB"),
+ PINCTRL_PIN(98, "CPU_GP_2"),
+ PINCTRL_PIN(99, "CPU_GP_3"),
+ PINCTRL_PIN(100, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(101, "CLKOUT_48"),
+ PINCTRL_PIN(102, "ISH_GP_7"),
+ PINCTRL_PIN(103, "ISH_GP_0"),
+ PINCTRL_PIN(104, "ISH_GP_1"),
+ PINCTRL_PIN(105, "ISH_GP_2"),
+ PINCTRL_PIN(106, "I2S_MCLK"),
+ PINCTRL_PIN(107, "SLP_S0B"),
+ PINCTRL_PIN(108, "PLTRSTB"),
+ PINCTRL_PIN(109, "SPKR"),
+ PINCTRL_PIN(110, "ISH_GP_3"),
+ PINCTRL_PIN(111, "ISH_GP_4"),
+ PINCTRL_PIN(112, "ISH_GP_5"),
+ PINCTRL_PIN(113, "PMCALERTB"),
+ PINCTRL_PIN(114, "FUSA_DIAGTEST_EN"),
+ PINCTRL_PIN(115, "FUSA_DIAGTEST_MODE"),
+ PINCTRL_PIN(116, "GPP_B_21"),
+ PINCTRL_PIN(117, "GPP_B_22"),
+ PINCTRL_PIN(118, "SML1ALERTB"),
+ /* GPP_G */
+ PINCTRL_PIN(119, "GPP_G_0"),
+ PINCTRL_PIN(120, "GPP_G_1"),
+ PINCTRL_PIN(121, "DNX_FORCE_RELOAD"),
+ PINCTRL_PIN(122, "GMII_MDC_0"),
+ PINCTRL_PIN(123, "GMII_MDIO_0"),
+ PINCTRL_PIN(124, "SLP_DRAMB"),
+ PINCTRL_PIN(125, "GPP_G_6"),
+ PINCTRL_PIN(126, "GPP_G_7"),
+ /* GPP_H */
+ PINCTRL_PIN(127, "SRCCLKREQB_18"),
+ PINCTRL_PIN(128, "GPP_H_1"),
+ PINCTRL_PIN(129, "SRCCLKREQB_8"),
+ PINCTRL_PIN(130, "SRCCLKREQB_9"),
+ PINCTRL_PIN(131, "SRCCLKREQB_10"),
+ PINCTRL_PIN(132, "SRCCLKREQB_11"),
+ PINCTRL_PIN(133, "SRCCLKREQB_12"),
+ PINCTRL_PIN(134, "SRCCLKREQB_13"),
+ PINCTRL_PIN(135, "SRCCLKREQB_14"),
+ PINCTRL_PIN(136, "SRCCLKREQB_15"),
+ PINCTRL_PIN(137, "SML2CLK"),
+ PINCTRL_PIN(138, "SML2DATA"),
+ PINCTRL_PIN(139, "SML2ALERTB"),
+ PINCTRL_PIN(140, "SML3CLK"),
+ PINCTRL_PIN(141, "SML3DATA"),
+ PINCTRL_PIN(142, "SML3ALERTB"),
+ PINCTRL_PIN(143, "SML4CLK"),
+ PINCTRL_PIN(144, "SML4DATA"),
+ PINCTRL_PIN(145, "SML4ALERTB"),
+ PINCTRL_PIN(146, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(147, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(148, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(149, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(150, "TIME_SYNC_0"),
+ /* SPI0 */
+ PINCTRL_PIN(151, "SPI0_IO_2"),
+ PINCTRL_PIN(152, "SPI0_IO_3"),
+ PINCTRL_PIN(153, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(154, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(155, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(156, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(157, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(158, "SPI0_CLK"),
+ PINCTRL_PIN(159, "SPI0_CLK_LOOPBK"),
+ /* GPP_A */
+ PINCTRL_PIN(160, "ESPI_IO_0"),
+ PINCTRL_PIN(161, "ESPI_IO_1"),
+ PINCTRL_PIN(162, "ESPI_IO_2"),
+ PINCTRL_PIN(163, "ESPI_IO_3"),
+ PINCTRL_PIN(164, "ESPI_CS0B"),
+ PINCTRL_PIN(165, "ESPI_CLK"),
+ PINCTRL_PIN(166, "ESPI_RESETB"),
+ PINCTRL_PIN(167, "ESPI_CS1B"),
+ PINCTRL_PIN(168, "ESPI_CS2B"),
+ PINCTRL_PIN(169, "ESPI_CS3B"),
+ PINCTRL_PIN(170, "ESPI_ALERT0B"),
+ PINCTRL_PIN(171, "ESPI_ALERT1B"),
+ PINCTRL_PIN(172, "ESPI_ALERT2B"),
+ PINCTRL_PIN(173, "ESPI_ALERT3B"),
+ PINCTRL_PIN(174, "GPP_A_14"),
+ PINCTRL_PIN(175, "ESPI_CLK_LOOPBK"),
+ /* GPP_C */
+ PINCTRL_PIN(176, "SMBCLK"),
+ PINCTRL_PIN(177, "SMBDATA"),
+ PINCTRL_PIN(178, "SMBALERTB"),
+ PINCTRL_PIN(179, "ISH_UART0_RXD"),
+ PINCTRL_PIN(180, "ISH_UART0_TXD"),
+ PINCTRL_PIN(181, "SML0ALERTB"),
+ PINCTRL_PIN(182, "ISH_I2C2_SDA"),
+ PINCTRL_PIN(183, "ISH_I2C2_SCL"),
+ PINCTRL_PIN(184, "UART0_RXD"),
+ PINCTRL_PIN(185, "UART0_TXD"),
+ PINCTRL_PIN(186, "UART0_RTSB"),
+ PINCTRL_PIN(187, "UART0_CTSB"),
+ PINCTRL_PIN(188, "UART1_RXD"),
+ PINCTRL_PIN(189, "UART1_TXD"),
+ PINCTRL_PIN(190, "UART1_RTSB"),
+ PINCTRL_PIN(191, "UART1_CTSB"),
+ PINCTRL_PIN(192, "I2C0_SDA"),
+ PINCTRL_PIN(193, "I2C0_SCL"),
+ PINCTRL_PIN(194, "I2C1_SDA"),
+ PINCTRL_PIN(195, "I2C1_SCL"),
+ PINCTRL_PIN(196, "UART2_RXD"),
+ PINCTRL_PIN(197, "UART2_TXD"),
+ PINCTRL_PIN(198, "UART2_RTSB"),
+ PINCTRL_PIN(199, "UART2_CTSB"),
+ /* GPP_S */
+ PINCTRL_PIN(200, "SNDW1_CLK"),
+ PINCTRL_PIN(201, "SNDW1_DATA"),
+ PINCTRL_PIN(202, "SNDW2_CLK"),
+ PINCTRL_PIN(203, "SNDW2_DATA"),
+ PINCTRL_PIN(204, "SNDW3_CLK"),
+ PINCTRL_PIN(205, "SNDW3_DATA"),
+ PINCTRL_PIN(206, "SNDW4_CLK"),
+ PINCTRL_PIN(207, "SNDW4_DATA"),
+ /* GPP_E */
+ PINCTRL_PIN(208, "SATAXPCIE_0"),
+ PINCTRL_PIN(209, "SATAXPCIE_1"),
+ PINCTRL_PIN(210, "SATAXPCIE_2"),
+ PINCTRL_PIN(211, "CPU_GP_0"),
+ PINCTRL_PIN(212, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(213, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(214, "SATA_DEVSLP_2"),
+ PINCTRL_PIN(215, "CPU_GP_1"),
+ PINCTRL_PIN(216, "SATA_LEDB"),
+ PINCTRL_PIN(217, "USB2_OCB_0"),
+ PINCTRL_PIN(218, "USB2_OCB_1"),
+ PINCTRL_PIN(219, "USB2_OCB_2"),
+ PINCTRL_PIN(220, "USB2_OCB_3"),
+ PINCTRL_PIN(221, "SPI1_CSB"),
+ PINCTRL_PIN(222, "SPI1_CLK"),
+ PINCTRL_PIN(223, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(224, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(225, "SPI1_IO_2"),
+ PINCTRL_PIN(226, "SPI1_IO_3"),
+ PINCTRL_PIN(227, "GPP_E_19"),
+ PINCTRL_PIN(228, "GPP_E_20"),
+ PINCTRL_PIN(229, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(230, "SPI1_CLK_LOOPBK"),
+ /* GPP_K */
+ PINCTRL_PIN(231, "GSXDOUT"),
+ PINCTRL_PIN(232, "GSXSLOAD"),
+ PINCTRL_PIN(233, "GSXDIN"),
+ PINCTRL_PIN(234, "GSXSRESETB"),
+ PINCTRL_PIN(235, "GSXCLK"),
+ PINCTRL_PIN(236, "ADR_COMPLETE"),
+ PINCTRL_PIN(237, "GPP_K_6"),
+ PINCTRL_PIN(238, "GPP_K_7"),
+ PINCTRL_PIN(239, "CORE_VID_0"),
+ PINCTRL_PIN(240, "CORE_VID_1"),
+ PINCTRL_PIN(241, "GPP_K_10"),
+ PINCTRL_PIN(242, "GPP_K_11"),
+ PINCTRL_PIN(243, "SYS_PWROK"),
+ PINCTRL_PIN(244, "SYS_RESETB"),
+ PINCTRL_PIN(245, "MLK_RSTB"),
+ /* GPP_F */
+ PINCTRL_PIN(246, "SATAXPCIE_3"),
+ PINCTRL_PIN(247, "SATAXPCIE_4"),
+ PINCTRL_PIN(248, "SATAXPCIE_5"),
+ PINCTRL_PIN(249, "SATAXPCIE_6"),
+ PINCTRL_PIN(250, "SATAXPCIE_7"),
+ PINCTRL_PIN(251, "SATA_DEVSLP_3"),
+ PINCTRL_PIN(252, "SATA_DEVSLP_4"),
+ PINCTRL_PIN(253, "SATA_DEVSLP_5"),
+ PINCTRL_PIN(254, "SATA_DEVSLP_6"),
+ PINCTRL_PIN(255, "SATA_DEVSLP_7"),
+ PINCTRL_PIN(256, "SATA_SCLOCK"),
+ PINCTRL_PIN(257, "SATA_SLOAD"),
+ PINCTRL_PIN(258, "SATA_SDATAOUT1"),
+ PINCTRL_PIN(259, "SATA_SDATAOUT0"),
+ PINCTRL_PIN(260, "PS_ONB"),
+ PINCTRL_PIN(261, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(262, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(263, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(264, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(265, "L_VDDEN"),
+ PINCTRL_PIN(266, "L_BKLTEN"),
+ PINCTRL_PIN(267, "L_BKLTCTL"),
+ PINCTRL_PIN(268, "VNN_CTRL"),
+ PINCTRL_PIN(269, "GPP_F_23"),
+ /* GPP_D */
+ PINCTRL_PIN(270, "SRCCLKREQB_0"),
+ PINCTRL_PIN(271, "SRCCLKREQB_1"),
+ PINCTRL_PIN(272, "SRCCLKREQB_2"),
+ PINCTRL_PIN(273, "SRCCLKREQB_3"),
+ PINCTRL_PIN(274, "SML1CLK"),
+ PINCTRL_PIN(275, "I2S2_SFRM"),
+ PINCTRL_PIN(276, "I2S2_TXD"),
+ PINCTRL_PIN(277, "I2S2_RXD"),
+ PINCTRL_PIN(278, "I2S2_SCLK"),
+ PINCTRL_PIN(279, "SML0CLK"),
+ PINCTRL_PIN(280, "SML0DATA"),
+ PINCTRL_PIN(281, "SRCCLKREQB_4"),
+ PINCTRL_PIN(282, "SRCCLKREQB_5"),
+ PINCTRL_PIN(283, "SRCCLKREQB_6"),
+ PINCTRL_PIN(284, "SRCCLKREQB_7"),
+ PINCTRL_PIN(285, "SML1DATA"),
+ PINCTRL_PIN(286, "GSPI3_CS0B"),
+ PINCTRL_PIN(287, "GSPI3_CLK"),
+ PINCTRL_PIN(288, "GSPI3_MISO"),
+ PINCTRL_PIN(289, "GSPI3_MOSI"),
+ PINCTRL_PIN(290, "UART3_RXD"),
+ PINCTRL_PIN(291, "UART3_TXD"),
+ PINCTRL_PIN(292, "UART3_RTSB"),
+ PINCTRL_PIN(293, "UART3_CTSB"),
+ PINCTRL_PIN(294, "GSPI3_CLK_LOOPBK"),
+ /* JTAG */
+ PINCTRL_PIN(295, "JTAG_TDO"),
+ PINCTRL_PIN(296, "JTAGX"),
+ PINCTRL_PIN(297, "PRDYB"),
+ PINCTRL_PIN(298, "PREQB"),
+ PINCTRL_PIN(299, "JTAG_TDI"),
+ PINCTRL_PIN(300, "JTAG_TMS"),
+ PINCTRL_PIN(301, "JTAG_TCK"),
+ PINCTRL_PIN(302, "DBG_PMODE"),
+ PINCTRL_PIN(303, "CPU_TRSTB"),
+};
+
+static const struct intel_padgroup adls_community0_gpps[] = {
+ ADL_GPP(0, 0, 24, 0), /* GPP_I */
+ ADL_GPP(1, 25, 47, 32), /* GPP_R */
+ ADL_GPP(2, 48, 59, 64), /* GPP_J */
+ ADL_GPP(3, 60, 86, 96), /* vGPIO */
+ ADL_GPP(4, 87, 94, 128), /* vGPIO_0 */
+};
+
+static const struct intel_padgroup adls_community1_gpps[] = {
+ ADL_GPP(0, 95, 118, 160), /* GPP_B */
+ ADL_GPP(1, 119, 126, 192), /* GPP_G */
+ ADL_GPP(2, 127, 150, 224), /* GPP_H */
+};
+
+static const struct intel_padgroup adls_community3_gpps[] = {
+ ADL_GPP(0, 151, 159, INTEL_GPIO_BASE_NOMAP), /* SPI0 */
+ ADL_GPP(1, 160, 175, 256), /* GPP_A */
+ ADL_GPP(2, 176, 199, 288), /* GPP_C */
+};
+
+static const struct intel_padgroup adls_community4_gpps[] = {
+ ADL_GPP(0, 200, 207, 320), /* GPP_S */
+ ADL_GPP(1, 208, 230, 352), /* GPP_E */
+ ADL_GPP(2, 231, 245, 384), /* GPP_K */
+ ADL_GPP(3, 246, 269, 416), /* GPP_F */
+};
+
+static const struct intel_padgroup adls_community5_gpps[] = {
+ ADL_GPP(0, 270, 294, 448), /* GPP_D */
+ ADL_GPP(1, 295, 303, INTEL_GPIO_BASE_NOMAP), /* JTAG */
+};
+
+static const struct intel_community adls_communities[] = {
+ ADL_COMMUNITY(0, 0, 94, adls_community0_gpps),
+ ADL_COMMUNITY(1, 95, 150, adls_community1_gpps),
+ ADL_COMMUNITY(2, 151, 199, adls_community3_gpps),
+ ADL_COMMUNITY(3, 200, 269, adls_community4_gpps),
+ ADL_COMMUNITY(4, 270, 303, adls_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data adls_soc_data = {
+ .pins = adls_pins,
+ .npins = ARRAY_SIZE(adls_pins),
+ .communities = adls_communities,
+ .ncommunities = ARRAY_SIZE(adls_communities),
+};
+
+static const struct acpi_device_id adl_pinctrl_acpi_match[] = {
+ { "INTC1056", (kernel_ulong_t)&adls_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, adl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(adl_pinctrl_pm_ops);
+
+static struct platform_driver adl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_hid,
+ .driver = {
+ .name = "alderlake-pinctrl",
+ .acpi_match_table = adl_pinctrl_acpi_match,
+ .pm = &adl_pinctrl_pm_ops,
+ },
+};
+module_platform_driver(adl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Alder Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-elkhartlake.c b/drivers/pinctrl/intel/pinctrl-elkhartlake.c
new file mode 100644
index 000000000000..4702bdfa10e3
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-elkhartlake.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Elkhart Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define EHL_PAD_OWN 0x020
+#define EHL_PADCFGLOCK 0x080
+#define EHL_HOSTSW_OWN 0x0b0
+#define EHL_GPI_IS 0x100
+#define EHL_GPI_IE 0x120
+
+#define EHL_GPP(r, s, e) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ }
+
+#define EHL_COMMUNITY(s, e, g) \
+ { \
+ .padown_offset = EHL_PAD_OWN, \
+ .padcfglock_offset = EHL_PADCFGLOCK, \
+ .hostown_offset = EHL_HOSTSW_OWN, \
+ .is_offset = EHL_GPI_IS, \
+ .ie_offset = EHL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Elkhart Lake */
+static const struct pinctrl_pin_desc ehl_community0_pins[] = {
+ /* GPP_B */
+ PINCTRL_PIN(0, "CORE_VID_0"),
+ PINCTRL_PIN(1, "CORE_VID_1"),
+ PINCTRL_PIN(2, "VRALERTB"),
+ PINCTRL_PIN(3, "CPU_GP_2"),
+ PINCTRL_PIN(4, "CPU_GP_3"),
+ PINCTRL_PIN(5, "OSE_I2C0_SCLK"),
+ PINCTRL_PIN(6, "OSE_I2C0_SDAT"),
+ PINCTRL_PIN(7, "OSE_I2C1_SCLK"),
+ PINCTRL_PIN(8, "OSE_I2C1_SDAT"),
+ PINCTRL_PIN(9, "I2C5_SDA"),
+ PINCTRL_PIN(10, "I2C5_SCL"),
+ PINCTRL_PIN(11, "PMCALERTB"),
+ PINCTRL_PIN(12, "SLP_S0B"),
+ PINCTRL_PIN(13, "PLTRSTB"),
+ PINCTRL_PIN(14, "SPKR"),
+ PINCTRL_PIN(15, "GSPI0_CS0B"),
+ PINCTRL_PIN(16, "GSPI0_CLK"),
+ PINCTRL_PIN(17, "GSPI0_MISO"),
+ PINCTRL_PIN(18, "GSPI0_MOSI"),
+ PINCTRL_PIN(19, "GSPI1_CS0B"),
+ PINCTRL_PIN(20, "GSPI1_CLK"),
+ PINCTRL_PIN(21, "GSPI1_MISO"),
+ PINCTRL_PIN(22, "GSPI1_MOSI"),
+ PINCTRL_PIN(23, "GPPC_B_23"),
+ PINCTRL_PIN(24, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(25, "GSPI1_CLK_LOOPBK"),
+ /* GPP_T */
+ PINCTRL_PIN(26, "OSE_QEPA_2"),
+ PINCTRL_PIN(27, "OSE_QEPB_2"),
+ PINCTRL_PIN(28, "OSE_QEPI_2"),
+ PINCTRL_PIN(29, "GPPC_T_3"),
+ PINCTRL_PIN(30, "RGMII0_INT"),
+ PINCTRL_PIN(31, "RGMII0_RESETB"),
+ PINCTRL_PIN(32, "RGMII0_AUXTS"),
+ PINCTRL_PIN(33, "RGMII0_PPS"),
+ PINCTRL_PIN(34, "USB2_OCB_2"),
+ PINCTRL_PIN(35, "OSE_HSUART2_EN"),
+ PINCTRL_PIN(36, "OSE_HSUART2_RE"),
+ PINCTRL_PIN(37, "USB2_OCB_3"),
+ PINCTRL_PIN(38, "OSE_UART2_RXD"),
+ PINCTRL_PIN(39, "OSE_UART2_TXD"),
+ PINCTRL_PIN(40, "OSE_UART2_RTSB"),
+ PINCTRL_PIN(41, "OSE_UART2_CTSB"),
+ /* GPP_G */
+ PINCTRL_PIN(42, "SD3_CMD"),
+ PINCTRL_PIN(43, "SD3_D0"),
+ PINCTRL_PIN(44, "SD3_D1"),
+ PINCTRL_PIN(45, "SD3_D2"),
+ PINCTRL_PIN(46, "SD3_D3"),
+ PINCTRL_PIN(47, "SD3_CDB"),
+ PINCTRL_PIN(48, "SD3_CLK"),
+ PINCTRL_PIN(49, "I2S2_SCLK"),
+ PINCTRL_PIN(50, "I2S2_SFRM"),
+ PINCTRL_PIN(51, "I2S2_TXD"),
+ PINCTRL_PIN(52, "I2S2_RXD"),
+ PINCTRL_PIN(53, "I2S3_SCLK"),
+ PINCTRL_PIN(54, "I2S3_SFRM"),
+ PINCTRL_PIN(55, "I2S3_TXD"),
+ PINCTRL_PIN(56, "I2S3_RXD"),
+ PINCTRL_PIN(57, "ESPI_IO_0"),
+ PINCTRL_PIN(58, "ESPI_IO_1"),
+ PINCTRL_PIN(59, "ESPI_IO_2"),
+ PINCTRL_PIN(60, "ESPI_IO_3"),
+ PINCTRL_PIN(61, "I2S1_SCLK"),
+ PINCTRL_PIN(62, "ESPI_CSB"),
+ PINCTRL_PIN(63, "ESPI_CLK"),
+ PINCTRL_PIN(64, "ESPI_RESETB"),
+ PINCTRL_PIN(65, "SD3_WP"),
+ PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup ehl_community0_gpps[] = {
+ EHL_GPP(0, 0, 25), /* GPP_B */
+ EHL_GPP(1, 26, 41), /* GPP_T */
+ EHL_GPP(2, 42, 66), /* GPP_G */
+};
+
+static const struct intel_community ehl_community0[] = {
+ EHL_COMMUNITY(0, 66, ehl_community0_gpps),
+};
+
+static const struct intel_pinctrl_soc_data ehl_community0_soc_data = {
+ .uid = "0",
+ .pins = ehl_community0_pins,
+ .npins = ARRAY_SIZE(ehl_community0_pins),
+ .communities = ehl_community0,
+ .ncommunities = ARRAY_SIZE(ehl_community0),
+};
+
+static const struct pinctrl_pin_desc ehl_community1_pins[] = {
+ /* GPP_V */
+ PINCTRL_PIN(0, "EMMC_CMD"),
+ PINCTRL_PIN(1, "EMMC_DATA0"),
+ PINCTRL_PIN(2, "EMMC_DATA1"),
+ PINCTRL_PIN(3, "EMMC_DATA2"),
+ PINCTRL_PIN(4, "EMMC_DATA3"),
+ PINCTRL_PIN(5, "EMMC_DATA4"),
+ PINCTRL_PIN(6, "EMMC_DATA5"),
+ PINCTRL_PIN(7, "EMMC_DATA6"),
+ PINCTRL_PIN(8, "EMMC_DATA7"),
+ PINCTRL_PIN(9, "EMMC_RCLK"),
+ PINCTRL_PIN(10, "EMMC_CLK"),
+ PINCTRL_PIN(11, "EMMC_RESETB"),
+ PINCTRL_PIN(12, "OSE_TGPIO0"),
+ PINCTRL_PIN(13, "OSE_TGPIO1"),
+ PINCTRL_PIN(14, "OSE_TGPIO2"),
+ PINCTRL_PIN(15, "OSE_TGPIO3"),
+ /* GPP_H */
+ PINCTRL_PIN(16, "RGMII1_INT"),
+ PINCTRL_PIN(17, "RGMII1_RESETB"),
+ PINCTRL_PIN(18, "RGMII1_AUXTS"),
+ PINCTRL_PIN(19, "RGMII1_PPS"),
+ PINCTRL_PIN(20, "I2C2_SDA"),
+ PINCTRL_PIN(21, "I2C2_SCL"),
+ PINCTRL_PIN(22, "I2C3_SDA"),
+ PINCTRL_PIN(23, "I2C3_SCL"),
+ PINCTRL_PIN(24, "I2C4_SDA"),
+ PINCTRL_PIN(25, "I2C4_SCL"),
+ PINCTRL_PIN(26, "SRCCLKREQB_4"),
+ PINCTRL_PIN(27, "SRCCLKREQB_5"),
+ PINCTRL_PIN(28, "OSE_UART1_RXD"),
+ PINCTRL_PIN(29, "OSE_UART1_TXD"),
+ PINCTRL_PIN(30, "GPPC_H_14"),
+ PINCTRL_PIN(31, "OSE_UART1_CTSB"),
+ PINCTRL_PIN(32, "PCIE_LNK_DOWN"),
+ PINCTRL_PIN(33, "SD_PWR_EN_B"),
+ PINCTRL_PIN(34, "CPU_C10_GATEB"),
+ PINCTRL_PIN(35, "GPPC_H_19"),
+ PINCTRL_PIN(36, "OSE_PWM7"),
+ PINCTRL_PIN(37, "OSE_HSUART1_DE"),
+ PINCTRL_PIN(38, "OSE_HSUART1_RE"),
+ PINCTRL_PIN(39, "OSE_HSUART1_EN"),
+ /* GPP_D */
+ PINCTRL_PIN(40, "OSE_QEPA_0"),
+ PINCTRL_PIN(41, "OSE_QEPB_0"),
+ PINCTRL_PIN(42, "OSE_QEPI_0"),
+ PINCTRL_PIN(43, "OSE_PWM6"),
+ PINCTRL_PIN(44, "OSE_PWM2"),
+ PINCTRL_PIN(45, "SRCCLKREQB_0"),
+ PINCTRL_PIN(46, "SRCCLKREQB_1"),
+ PINCTRL_PIN(47, "SRCCLKREQB_2"),
+ PINCTRL_PIN(48, "SRCCLKREQB_3"),
+ PINCTRL_PIN(49, "OSE_SPI0_CSB"),
+ PINCTRL_PIN(50, "OSE_SPI0_SCLK"),
+ PINCTRL_PIN(51, "OSE_SPI0_MISO"),
+ PINCTRL_PIN(52, "OSE_SPI0_MOSI"),
+ PINCTRL_PIN(53, "OSE_QEPA_1"),
+ PINCTRL_PIN(54, "OSE_QEPB_1"),
+ PINCTRL_PIN(55, "OSE_PWM3"),
+ PINCTRL_PIN(56, "OSE_QEPI_1"),
+ PINCTRL_PIN(57, "OSE_PWM4"),
+ PINCTRL_PIN(58, "OSE_PWM5"),
+ PINCTRL_PIN(59, "I2S_MCLK1_OUT"),
+ PINCTRL_PIN(60, "GSPI2_CLK_LOOPBK"),
+ /* GPP_U */
+ PINCTRL_PIN(61, "RGMII2_INT"),
+ PINCTRL_PIN(62, "RGMII2_RESETB"),
+ PINCTRL_PIN(63, "RGMII2_PPS"),
+ PINCTRL_PIN(64, "RGMII2_AUXTS"),
+ PINCTRL_PIN(65, "ISI_SPIM_CS"),
+ PINCTRL_PIN(66, "ISI_SPIM_SCLK"),
+ PINCTRL_PIN(67, "ISI_SPIM_MISO"),
+ PINCTRL_PIN(68, "OSE_QEPA_3"),
+ PINCTRL_PIN(69, "ISI_SPIS_CS"),
+ PINCTRL_PIN(70, "ISI_SPIS_SCLK"),
+ PINCTRL_PIN(71, "ISI_SPIS_MISO"),
+ PINCTRL_PIN(72, "OSE_QEPB_3"),
+ PINCTRL_PIN(73, "ISI_CHX_OKNOK_0"),
+ PINCTRL_PIN(74, "ISI_CHX_OKNOK_1"),
+ PINCTRL_PIN(75, "ISI_CHX_RLY_SWTCH"),
+ PINCTRL_PIN(76, "ISI_CHX_PMIC_EN"),
+ PINCTRL_PIN(77, "ISI_OKNOK_0"),
+ PINCTRL_PIN(78, "ISI_OKNOK_1"),
+ PINCTRL_PIN(79, "ISI_ALERT"),
+ PINCTRL_PIN(80, "OSE_QEPI_3"),
+ PINCTRL_PIN(81, "GSPI3_CLK_LOOPBK"),
+ PINCTRL_PIN(82, "GSPI4_CLK_LOOPBK"),
+ PINCTRL_PIN(83, "GSPI5_CLK_LOOPBK"),
+ PINCTRL_PIN(84, "GSPI6_CLK_LOOPBK"),
+ /* vGPIO */
+ PINCTRL_PIN(85, "CNV_BTEN"),
+ PINCTRL_PIN(86, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(87, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(88, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(89, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(90, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(91, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(92, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(93, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(94, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(95, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(96, "vUART0_TXD"),
+ PINCTRL_PIN(97, "vUART0_RXD"),
+ PINCTRL_PIN(98, "vUART0_CTS_B"),
+ PINCTRL_PIN(99, "vUART0_RTS_B"),
+ PINCTRL_PIN(100, "vOSE_UART0_TXD"),
+ PINCTRL_PIN(101, "vOSE_UART0_RXD"),
+ PINCTRL_PIN(102, "vOSE_UART0_CTS_B"),
+ PINCTRL_PIN(103, "vOSE_UART0_RTS_B"),
+ PINCTRL_PIN(104, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(105, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(106, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(107, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(108, "vI2S2_SCLK"),
+ PINCTRL_PIN(109, "vI2S2_SFRM"),
+ PINCTRL_PIN(110, "vI2S2_TXD"),
+ PINCTRL_PIN(111, "vI2S2_RXD"),
+ PINCTRL_PIN(112, "vSD3_CD_B"),
+};
+
+static const struct intel_padgroup ehl_community1_gpps[] = {
+ EHL_GPP(0, 0, 15), /* GPP_V */
+ EHL_GPP(1, 16, 39), /* GPP_H */
+ EHL_GPP(2, 40, 60), /* GPP_D */
+ EHL_GPP(3, 61, 84), /* GPP_U */
+ EHL_GPP(4, 85, 112), /* vGPIO */
+};
+
+static const struct intel_community ehl_community1[] = {
+ EHL_COMMUNITY(0, 112, ehl_community1_gpps),
+};
+
+static const struct intel_pinctrl_soc_data ehl_community1_soc_data = {
+ .uid = "1",
+ .pins = ehl_community1_pins,
+ .npins = ARRAY_SIZE(ehl_community1_pins),
+ .communities = ehl_community1,
+ .ncommunities = ARRAY_SIZE(ehl_community1),
+};
+
+static const struct pinctrl_pin_desc ehl_community3_pins[] = {
+ /* CPU */
+ PINCTRL_PIN(0, "HDACPU_SDI"),
+ PINCTRL_PIN(1, "HDACPU_SDO"),
+ PINCTRL_PIN(2, "HDACPU_BCLK"),
+ PINCTRL_PIN(3, "PM_SYNC"),
+ PINCTRL_PIN(4, "PECI"),
+ PINCTRL_PIN(5, "CPUPWRGD"),
+ PINCTRL_PIN(6, "THRMTRIPB"),
+ PINCTRL_PIN(7, "PLTRST_CPUB"),
+ PINCTRL_PIN(8, "PM_DOWN"),
+ PINCTRL_PIN(9, "TRIGGER_IN"),
+ PINCTRL_PIN(10, "TRIGGER_OUT"),
+ PINCTRL_PIN(11, "UFS_RESETB"),
+ PINCTRL_PIN(12, "CLKOUT_CPURTC"),
+ PINCTRL_PIN(13, "VCCST_OVERRIDE"),
+ PINCTRL_PIN(14, "C10_WAKE"),
+ PINCTRL_PIN(15, "PROCHOTB"),
+ PINCTRL_PIN(16, "CATERRB"),
+ /* GPP_S */
+ PINCTRL_PIN(17, "UFS_REF_CLK_0"),
+ PINCTRL_PIN(18, "UFS_REF_CLK_1"),
+ /* GPP_A */
+ PINCTRL_PIN(19, "RGMII0_TXDATA_3"),
+ PINCTRL_PIN(20, "RGMII0_TXDATA_2"),
+ PINCTRL_PIN(21, "RGMII0_TXDATA_1"),
+ PINCTRL_PIN(22, "RGMII0_TXDATA_0"),
+ PINCTRL_PIN(23, "RGMII0_TXCLK"),
+ PINCTRL_PIN(24, "RGMII0_TXCTL"),
+ PINCTRL_PIN(25, "RGMII0_RXCLK"),
+ PINCTRL_PIN(26, "RGMII0_RXDATA_3"),
+ PINCTRL_PIN(27, "RGMII0_RXDATA_2"),
+ PINCTRL_PIN(28, "RGMII0_RXDATA_1"),
+ PINCTRL_PIN(29, "RGMII0_RXDATA_0"),
+ PINCTRL_PIN(30, "RGMII1_TXDATA_3"),
+ PINCTRL_PIN(31, "RGMII1_TXDATA_2"),
+ PINCTRL_PIN(32, "RGMII1_TXDATA_1"),
+ PINCTRL_PIN(33, "RGMII1_TXDATA_0"),
+ PINCTRL_PIN(34, "RGMII1_TXCLK"),
+ PINCTRL_PIN(35, "RGMII1_TXCTL"),
+ PINCTRL_PIN(36, "RGMII1_RXCLK"),
+ PINCTRL_PIN(37, "RGMII1_RXCTL"),
+ PINCTRL_PIN(38, "RGMII1_RXDATA_3"),
+ PINCTRL_PIN(39, "RGMII1_RXDATA_2"),
+ PINCTRL_PIN(40, "RGMII1_RXDATA_1"),
+ PINCTRL_PIN(41, "RGMII1_RXDATA_0"),
+ PINCTRL_PIN(42, "RGMII0_RXCTL"),
+ /* vGPIO_3 */
+ PINCTRL_PIN(43, "ESPI_USB_OCB_0"),
+ PINCTRL_PIN(44, "ESPI_USB_OCB_1"),
+ PINCTRL_PIN(45, "ESPI_USB_OCB_2"),
+ PINCTRL_PIN(46, "ESPI_USB_OCB_3"),
+};
+
+static const struct intel_padgroup ehl_community3_gpps[] = {
+ EHL_GPP(0, 0, 16), /* CPU */
+ EHL_GPP(1, 17, 18), /* GPP_S */
+ EHL_GPP(2, 19, 42), /* GPP_A */
+ EHL_GPP(3, 43, 46), /* vGPIO_3 */
+};
+
+static const struct intel_community ehl_community3[] = {
+ EHL_COMMUNITY(0, 46, ehl_community3_gpps),
+};
+
+static const struct intel_pinctrl_soc_data ehl_community3_soc_data = {
+ .uid = "3",
+ .pins = ehl_community3_pins,
+ .npins = ARRAY_SIZE(ehl_community3_pins),
+ .communities = ehl_community3,
+ .ncommunities = ARRAY_SIZE(ehl_community3),
+};
+
+static const struct pinctrl_pin_desc ehl_community4_pins[] = {
+ /* GPP_C */
+ PINCTRL_PIN(0, "SMBCLK"),
+ PINCTRL_PIN(1, "SMBDATA"),
+ PINCTRL_PIN(2, "OSE_PWM0"),
+ PINCTRL_PIN(3, "RGMII0_MDC"),
+ PINCTRL_PIN(4, "RGMII0_MDIO"),
+ PINCTRL_PIN(5, "OSE_PWM1"),
+ PINCTRL_PIN(6, "RGMII1_MDC"),
+ PINCTRL_PIN(7, "RGMII1_MDIO"),
+ PINCTRL_PIN(8, "OSE_TGPIO4"),
+ PINCTRL_PIN(9, "OSE_HSUART0_EN"),
+ PINCTRL_PIN(10, "OSE_TGPIO5"),
+ PINCTRL_PIN(11, "OSE_HSUART0_RE"),
+ PINCTRL_PIN(12, "OSE_UART0_RXD"),
+ PINCTRL_PIN(13, "OSE_UART0_TXD"),
+ PINCTRL_PIN(14, "OSE_UART0_RTSB"),
+ PINCTRL_PIN(15, "OSE_UART0_CTSB"),
+ PINCTRL_PIN(16, "RGMII2_MDIO"),
+ PINCTRL_PIN(17, "RGMII2_MDC"),
+ PINCTRL_PIN(18, "OSE_I2C4_SDAT"),
+ PINCTRL_PIN(19, "OSE_I2C4_SCLK"),
+ PINCTRL_PIN(20, "OSE_UART4_RXD"),
+ PINCTRL_PIN(21, "OSE_UART4_TXD"),
+ PINCTRL_PIN(22, "OSE_UART4_RTSB"),
+ PINCTRL_PIN(23, "OSE_UART4_CTSB"),
+ /* GPP_F */
+ PINCTRL_PIN(24, "CNV_BRI_DT"),
+ PINCTRL_PIN(25, "CNV_BRI_RSP"),
+ PINCTRL_PIN(26, "CNV_RGI_DT"),
+ PINCTRL_PIN(27, "CNV_RGI_RSP"),
+ PINCTRL_PIN(28, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(29, "EMMC_HIP_MON"),
+ PINCTRL_PIN(30, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(31, "OSE_I2S1_SCLK"),
+ PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
+ PINCTRL_PIN(33, "BOOTMPC"),
+ PINCTRL_PIN(34, "OSE_I2S1_SFRM"),
+ PINCTRL_PIN(35, "GPPC_F_11"),
+ PINCTRL_PIN(36, "GSXDOUT"),
+ PINCTRL_PIN(37, "GSXSLOAD"),
+ PINCTRL_PIN(38, "GSXDIN"),
+ PINCTRL_PIN(39, "GSXSRESETB"),
+ PINCTRL_PIN(40, "GSXCLK"),
+ PINCTRL_PIN(41, "GPPC_F_17"),
+ PINCTRL_PIN(42, "OSE_I2S1_TXD"),
+ PINCTRL_PIN(43, "OSE_I2S1_RXD"),
+ PINCTRL_PIN(44, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
+ PINCTRL_PIN(46, "VNN_CTRL"),
+ PINCTRL_PIN(47, "V1P05_CTRL"),
+ PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
+ /* HVCMOS */
+ PINCTRL_PIN(49, "L_BKLTEN"),
+ PINCTRL_PIN(50, "L_BKLTCTL"),
+ PINCTRL_PIN(51, "L_VDDEN"),
+ PINCTRL_PIN(52, "SYS_PWROK"),
+ PINCTRL_PIN(53, "SYS_RESETB"),
+ PINCTRL_PIN(54, "MLK_RSTB"),
+ /* GPP_E */
+ PINCTRL_PIN(55, "SATA_LEDB"),
+ PINCTRL_PIN(56, "GPPC_E_1"),
+ PINCTRL_PIN(57, "GPPC_E_2"),
+ PINCTRL_PIN(58, "DDSP_HPD_B"),
+ PINCTRL_PIN(59, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(60, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(61, "GPPC_E_6"),
+ PINCTRL_PIN(62, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(63, "GPPC_E_8"),
+ PINCTRL_PIN(64, "USB2_OCB_0"),
+ PINCTRL_PIN(65, "GPPC_E_10"),
+ PINCTRL_PIN(66, "GPPC_E_11"),
+ PINCTRL_PIN(67, "GPPC_E_12"),
+ PINCTRL_PIN(68, "GPPC_E_13"),
+ PINCTRL_PIN(69, "DDSP_HPD_A"),
+ PINCTRL_PIN(70, "OSE_I2S0_RXD"),
+ PINCTRL_PIN(71, "OSE_I2S0_TXD"),
+ PINCTRL_PIN(72, "DDSP_HPD_C"),
+ PINCTRL_PIN(73, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(74, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(75, "OSE_I2S0_SCLK"),
+ PINCTRL_PIN(76, "OSE_I2S0_SFRM"),
+ PINCTRL_PIN(77, "DDPC_CTRLDATA"),
+ PINCTRL_PIN(78, "DDPC_CTRLCLK"),
+ PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup ehl_community4_gpps[] = {
+ EHL_GPP(0, 0, 23), /* GPP_C */
+ EHL_GPP(1, 24, 48), /* GPP_F */
+ EHL_GPP(2, 49, 54), /* HVCMOS */
+ EHL_GPP(3, 55, 79), /* GPP_E */
+};
+
+static const struct intel_community ehl_community4[] = {
+ EHL_COMMUNITY(0, 79, ehl_community4_gpps),
+};
+
+static const struct intel_pinctrl_soc_data ehl_community4_soc_data = {
+ .uid = "4",
+ .pins = ehl_community4_pins,
+ .npins = ARRAY_SIZE(ehl_community4_pins),
+ .communities = ehl_community4,
+ .ncommunities = ARRAY_SIZE(ehl_community4),
+};
+
+static const struct pinctrl_pin_desc ehl_community5_pins[] = {
+ /* GPP_R */
+ PINCTRL_PIN(0, "HDA_BCLK"),
+ PINCTRL_PIN(1, "HDA_SYNC"),
+ PINCTRL_PIN(2, "HDA_SDO"),
+ PINCTRL_PIN(3, "HDA_SDI_0"),
+ PINCTRL_PIN(4, "HDA_RSTB"),
+ PINCTRL_PIN(5, "HDA_SDI_1"),
+ PINCTRL_PIN(6, "GPP_R_6"),
+ PINCTRL_PIN(7, "GPP_R_7"),
+};
+
+static const struct intel_padgroup ehl_community5_gpps[] = {
+ EHL_GPP(0, 0, 7), /* GPP_R */
+};
+
+static const struct intel_community ehl_community5[] = {
+ EHL_COMMUNITY(0, 7, ehl_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data ehl_community5_soc_data = {
+ .uid = "5",
+ .pins = ehl_community5_pins,
+ .npins = ARRAY_SIZE(ehl_community5_pins),
+ .communities = ehl_community5,
+ .ncommunities = ARRAY_SIZE(ehl_community5),
+};
+
+static const struct intel_pinctrl_soc_data *ehl_soc_data_array[] = {
+ &ehl_community0_soc_data,
+ &ehl_community1_soc_data,
+ &ehl_community3_soc_data,
+ &ehl_community4_soc_data,
+ &ehl_community5_soc_data,
+ NULL
+};
+
+static const struct acpi_device_id ehl_pinctrl_acpi_match[] = {
+ { "INTC1020", (kernel_ulong_t)ehl_soc_data_array },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ehl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(ehl_pinctrl_pm_ops);
+
+static struct platform_driver ehl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_uid,
+ .driver = {
+ .name = "elkhartlake-pinctrl",
+ .acpi_match_table = ehl_pinctrl_acpi_match,
+ .pm = &ehl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(ehl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Elkhart Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-lakefield.c b/drivers/pinctrl/intel/pinctrl-lakefield.c
new file mode 100644
index 000000000000..3c6283c4827f
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-lakefield.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Lakefield PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define LKF_PAD_OWN 0x020
+#define LKF_PADCFGLOCK 0x070
+#define LKF_HOSTSW_OWN 0x090
+#define LKF_GPI_IS 0x100
+#define LKF_GPI_IE 0x110
+
+#define LKF_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define LKF_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = LKF_PAD_OWN, \
+ .padcfglock_offset = LKF_PADCFGLOCK, \
+ .hostown_offset = LKF_HOSTSW_OWN, \
+ .is_offset = LKF_GPI_IS, \
+ .ie_offset = LKF_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Lakefield */
+static const struct pinctrl_pin_desc lkf_pins[] = {
+ /* EAST */
+ PINCTRL_PIN(0, "MDSI_A_TE0"),
+ PINCTRL_PIN(1, "MDSI_A_TE1"),
+ PINCTRL_PIN(2, "PANEL0_AVDD_EN"),
+ PINCTRL_PIN(3, "PANEL0_BKLTEN"),
+ PINCTRL_PIN(4, "PANEL0_BKLTCTL"),
+ PINCTRL_PIN(5, "PANEL1_AVDD_EN"),
+ PINCTRL_PIN(6, "PANEL1_BKLTEN"),
+ PINCTRL_PIN(7, "PANEL1_BKLTCTL"),
+ PINCTRL_PIN(8, "THC0_SPI1_IO_0"),
+ PINCTRL_PIN(9, "THC0_SPI1_IO_1"),
+ PINCTRL_PIN(10, "THC0_SPI1_IO_2"),
+ PINCTRL_PIN(11, "THC0_SPI1_IO_3"),
+ PINCTRL_PIN(12, "THC0_SPI1_CSB"),
+ PINCTRL_PIN(13, "THC0_SPI1_CLK"),
+ PINCTRL_PIN(14, "THC0_SPI1_RESETB"),
+ PINCTRL_PIN(15, "THC0_SPI1_CLK_FB"),
+ PINCTRL_PIN(16, "SPI_TOUCH_CLK_FB"),
+ PINCTRL_PIN(17, "THC1_SPI2_IO_0"),
+ PINCTRL_PIN(18, "THC1_SPI2_IO_1"),
+ PINCTRL_PIN(19, "THC1_SPI2_IO_2"),
+ PINCTRL_PIN(20, "THC1_SPI2_IO_3"),
+ PINCTRL_PIN(21, "THC1_SPI2_CSB"),
+ PINCTRL_PIN(22, "THC1_SPI2_CLK"),
+ PINCTRL_PIN(23, "THC1_SPI2_RESETB"),
+ PINCTRL_PIN(24, "THC1_SPI2_CLK_FB"),
+ PINCTRL_PIN(25, "eSPI_IO_0"),
+ PINCTRL_PIN(26, "eSPI_IO_1"),
+ PINCTRL_PIN(27, "eSPI_IO_2"),
+ PINCTRL_PIN(28, "eSPI_IO_3"),
+ PINCTRL_PIN(29, "eSPI_CSB"),
+ PINCTRL_PIN(30, "eSPI_RESETB"),
+ PINCTRL_PIN(31, "eSPI_CLK"),
+ PINCTRL_PIN(32, "eSPI_CLK_FB"),
+ PINCTRL_PIN(33, "FAST_SPI0_IO_0"),
+ PINCTRL_PIN(34, "FAST_SPI0_IO_1"),
+ PINCTRL_PIN(35, "FAST_SPI0_IO_2"),
+ PINCTRL_PIN(36, "FAST_SPI0_IO_3"),
+ PINCTRL_PIN(37, "FAST_SPI0_CSB_0"),
+ PINCTRL_PIN(38, "FAST_SPI0_CSB_2"),
+ PINCTRL_PIN(39, "FAST_SPI0_CLK"),
+ PINCTRL_PIN(40, "FAST_SPI_CLK_FB"),
+ PINCTRL_PIN(41, "FAST_SPI0_CSB_1"),
+ PINCTRL_PIN(42, "ISH_GP_12"),
+ PINCTRL_PIN(43, "THC0_SPI1_INTB"),
+ PINCTRL_PIN(44, "THC1_SPI2_INTB"),
+ PINCTRL_PIN(45, "PANEL0_AVEE_EN"),
+ PINCTRL_PIN(46, "PANEL0_VIO_EN"),
+ PINCTRL_PIN(47, "PANEL1_AVEE_EN"),
+ PINCTRL_PIN(48, "PANEL1_VIO_EN"),
+ PINCTRL_PIN(49, "PANEL0_RESET"),
+ PINCTRL_PIN(50, "PANEL1_RESET"),
+ PINCTRL_PIN(51, "ISH_GP_15"),
+ PINCTRL_PIN(52, "ISH_GP_16"),
+ PINCTRL_PIN(53, "ISH_GP_17"),
+ PINCTRL_PIN(54, "ISH_GP_18"),
+ PINCTRL_PIN(55, "ISH_GP_19"),
+ PINCTRL_PIN(56, "ISH_GP_20"),
+ PINCTRL_PIN(57, "ISH_GP_21"),
+ PINCTRL_PIN(58, "ISH_GP_22"),
+ PINCTRL_PIN(59, "ISH_GP_23"),
+ /* NORTHWEST */
+ PINCTRL_PIN(60, "MCSI_GPIO_0"),
+ PINCTRL_PIN(61, "MCSI_GPIO_1"),
+ PINCTRL_PIN(62, "MCSI_GPIO_2"),
+ PINCTRL_PIN(63, "MCSI_GPIO_3"),
+ PINCTRL_PIN(64, "LPSS_I2C0_SDA"),
+ PINCTRL_PIN(65, "LPSS_I2C0_SCL"),
+ PINCTRL_PIN(66, "LPSS_I2C1_SDA"),
+ PINCTRL_PIN(67, "LPSS_I2C1_SCL"),
+ PINCTRL_PIN(68, "LPSS_I2C2_SDA"),
+ PINCTRL_PIN(69, "LPSS_I2C2_SCL"),
+ PINCTRL_PIN(70, "LPSS_I2C3_SDA"),
+ PINCTRL_PIN(71, "LPSS_I2C3_SCL"),
+ PINCTRL_PIN(72, "LPSS_I2C4_SDA"),
+ PINCTRL_PIN(73, "LPSS_I2C4_SCL"),
+ PINCTRL_PIN(74, "LPSS_I2C5_SDA"),
+ PINCTRL_PIN(75, "LPSS_I2C5_SCL"),
+ PINCTRL_PIN(76, "LPSS_I3C0_SDA"),
+ PINCTRL_PIN(77, "LPSS_I3C0_SCL"),
+ PINCTRL_PIN(78, "LPSS_I3C0_SCL_FB"),
+ PINCTRL_PIN(79, "LPSS_I3C1_SDA"),
+ PINCTRL_PIN(80, "LPSS_I3C1_SCL"),
+ PINCTRL_PIN(81, "LPSS_I3C1_SCL_FB"),
+ PINCTRL_PIN(82, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(83, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(84, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(85, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(86, "DBG_PMODE"),
+ PINCTRL_PIN(87, "BJTAG_TCK"),
+ PINCTRL_PIN(88, "BJTAG_TDI"),
+ PINCTRL_PIN(89, "BJTAGX"),
+ PINCTRL_PIN(90, "BPREQ_B"),
+ PINCTRL_PIN(91, "BJTAG_TMS"),
+ PINCTRL_PIN(92, "BPRDY_B"),
+ PINCTRL_PIN(93, "BJTAG_TDO"),
+ PINCTRL_PIN(94, "BJTAG_TRST_B_0"),
+ PINCTRL_PIN(95, "ISH_I3C0_SDA"),
+ PINCTRL_PIN(96, "ISH_I3C0_SCL"),
+ PINCTRL_PIN(97, "ISH_I3C0_SCL_FB"),
+ PINCTRL_PIN(98, "AVS_I2S_BCLK_0"),
+ PINCTRL_PIN(99, "AVS_I2S_MCLK_0"),
+ PINCTRL_PIN(100, "AVS_I2S_SFRM_0"),
+ PINCTRL_PIN(101, "AVS_I2S_RXD_0"),
+ PINCTRL_PIN(102, "AVS_I2S_TXD_0"),
+ PINCTRL_PIN(103, "AVS_I2S_BCLK_1"),
+ PINCTRL_PIN(104, "AVS_I2S_SFRM_1"),
+ PINCTRL_PIN(105, "AVS_I2S_RXD_1"),
+ PINCTRL_PIN(106, "AVS_I2S_TXD_1"),
+ PINCTRL_PIN(107, "AVS_I2S_BCLK_2"),
+ PINCTRL_PIN(108, "AVS_I2S_SFRM_2"),
+ PINCTRL_PIN(109, "AVS_I2S_RXD_2"),
+ PINCTRL_PIN(110, "AVS_I2S_TXD_2"),
+ PINCTRL_PIN(111, "AVS_I2S_BCLK_3"),
+ PINCTRL_PIN(112, "AVS_I2S_SFRM_3"),
+ PINCTRL_PIN(113, "AVS_I2S_RXD_3"),
+ PINCTRL_PIN(114, "AVS_I2S_TXD_3"),
+ PINCTRL_PIN(115, "AVS_I2S_BCLK_4"),
+ PINCTRL_PIN(116, "AVS_I2S_SFRM_4"),
+ PINCTRL_PIN(117, "AVS_I2S_RXD_4"),
+ PINCTRL_PIN(118, "AVS_I2S_TXD_4"),
+ PINCTRL_PIN(119, "AVS_I2S_SFRM_5"),
+ PINCTRL_PIN(120, "AVS_I2S_RXD_5"),
+ PINCTRL_PIN(121, "AVS_I2S_TXD_5"),
+ PINCTRL_PIN(122, "AVS_I2S_BCLK_5"),
+ PINCTRL_PIN(123, "AVS_SNDW_CLK_0"),
+ PINCTRL_PIN(124, "AVS_SNDW_DATA_0"),
+ PINCTRL_PIN(125, "AVS_SNDW_CLK_1"),
+ PINCTRL_PIN(126, "AVS_SNDW_DATA_1"),
+ PINCTRL_PIN(127, "AVS_SNDW_CLK_2"),
+ PINCTRL_PIN(128, "AVS_SNDW_DATA_2"),
+ PINCTRL_PIN(129, "AVS_SNDW_CLK_3"),
+ PINCTRL_PIN(130, "AVS_SNDW_DATA_3"),
+ PINCTRL_PIN(131, "VISA_PTI_CH0_D0_internal"),
+ PINCTRL_PIN(132, "VISA_PTI_CH0_D1_internal"),
+ PINCTRL_PIN(133, "VISA_PTI_CH0_D2_internal"),
+ PINCTRL_PIN(134, "VISA_PTI_CH0_D3_internal"),
+ PINCTRL_PIN(135, "VISA_PTI_CH0_D4_internal"),
+ PINCTRL_PIN(136, "VISA_PTI_CH0_D5_internal"),
+ PINCTRL_PIN(137, "VISA_PTI_CH0_D6_internal"),
+ PINCTRL_PIN(138, "VISA_PTI_CH0_D7_internal"),
+ PINCTRL_PIN(139, "VISA_PTI_CH0_CLK_internal"),
+ PINCTRL_PIN(140, "VISA_PTI_CH1_D0_internal"),
+ PINCTRL_PIN(141, "VISA_PTI_CH1_D1_internal"),
+ PINCTRL_PIN(142, "VISA_PTI_CH1_D2_internal"),
+ PINCTRL_PIN(143, "VISA_PTI_CH1_D3_internal"),
+ PINCTRL_PIN(144, "VISA_PTI_CH1_D4_internal"),
+ PINCTRL_PIN(145, "VISA_PTI_CH1_D5_internal"),
+ PINCTRL_PIN(146, "VISA_PTI_CH1_D6_internal"),
+ PINCTRL_PIN(147, "VISA_PTI_CH1_D7_internal"),
+ PINCTRL_PIN(148, "VISA_PTI_CH1_CLK_internal"),
+ /* WEST */
+ PINCTRL_PIN(149, "LPSS_UART0_TXD"),
+ PINCTRL_PIN(150, "LPSS_UART0_RXD"),
+ PINCTRL_PIN(151, "LPSS_UART0_RTS_B"),
+ PINCTRL_PIN(152, "LPSS_UART0_CTS_B"),
+ PINCTRL_PIN(153, "LPSS_UART1_RXD"),
+ PINCTRL_PIN(154, "LPSS_UART1_TXD"),
+ PINCTRL_PIN(155, "LPSS_UART1_RTS_B"),
+ PINCTRL_PIN(156, "LPSS_UART1_CTS_B"),
+ PINCTRL_PIN(157, "ISH_UART0_RXD"),
+ PINCTRL_PIN(158, "ISH_UART0_TXD"),
+ PINCTRL_PIN(159, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(160, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(161, "LPSS_SSP_0_CLK"),
+ PINCTRL_PIN(162, "LPSS_SSP_0_CLK_FB"),
+ PINCTRL_PIN(163, "LPSS_SSP_0_FS0"),
+ PINCTRL_PIN(164, "LPSS_SSP_0_FS1"),
+ PINCTRL_PIN(165, "LPSS_SSP_0_RXD"),
+ PINCTRL_PIN(166, "LPSS_SSP_0_TXD"),
+ PINCTRL_PIN(167, "ISH_UART1_RXD"),
+ PINCTRL_PIN(168, "ISH_UART1_TXD"),
+ PINCTRL_PIN(169, "ISH_UART1_RTSB"),
+ PINCTRL_PIN(170, "ISH_UART1_CTSB"),
+ PINCTRL_PIN(171, "LPSS_SSP_1_FS0"),
+ PINCTRL_PIN(172, "LPSS_SSP_1_FS1"),
+ PINCTRL_PIN(173, "LPSS_SSP_1_CLK"),
+ PINCTRL_PIN(174, "LPSS_SSP_1_CLK_FB"),
+ PINCTRL_PIN(175, "LPSS_SSP_1_RXD"),
+ PINCTRL_PIN(176, "LPSS_SSP_1_TXD"),
+ PINCTRL_PIN(177, "LPSS_SSP_2_CLK"),
+ PINCTRL_PIN(178, "LPSS_SSP_2_CLK_FB"),
+ PINCTRL_PIN(179, "LPSS_SSP_2_FS0"),
+ PINCTRL_PIN(180, "LPSS_SSP_2_FS1"),
+ PINCTRL_PIN(181, "LPSS_SSP_2_RXD"),
+ PINCTRL_PIN(182, "LPSS_SSP_2_TXD"),
+ PINCTRL_PIN(183, "ISH_SPI0_CSB0"),
+ PINCTRL_PIN(184, "ISH_SPI0_CSB1"),
+ PINCTRL_PIN(185, "ISH_SPI0_CLK"),
+ PINCTRL_PIN(186, "ISH_SPI0_MISO"),
+ PINCTRL_PIN(187, "ISH_SPI0_MOSI"),
+ PINCTRL_PIN(188, "ISH_GP_0"),
+ PINCTRL_PIN(189, "ISH_GP_1"),
+ PINCTRL_PIN(190, "ISH_GP_2"),
+ PINCTRL_PIN(191, "ISH_GP_13"),
+ PINCTRL_PIN(192, "ISH_GP_3"),
+ PINCTRL_PIN(193, "ISH_GP_4"),
+ PINCTRL_PIN(194, "ISH_GP_5"),
+ PINCTRL_PIN(195, "ISH_GP_6"),
+ PINCTRL_PIN(196, "ISH_GP_7"),
+ PINCTRL_PIN(197, "ISH_GP_8"),
+ PINCTRL_PIN(198, "ISH_GP_9"),
+ PINCTRL_PIN(199, "ISH_GP_10"),
+ PINCTRL_PIN(200, "ISH_GP_11"),
+ PINCTRL_PIN(201, "ISH_GP_14"),
+ PINCTRL_PIN(202, "ISH_GP_15"),
+ PINCTRL_PIN(203, "ISH_GP_22"),
+ PINCTRL_PIN(204, "ISH_GP_12"),
+ PINCTRL_PIN(205, "ISH_GP_30_USB_OC"),
+ PINCTRL_PIN(206, "LPDDRx_RESET0_n"),
+ PINCTRL_PIN(207, "UFS_RESET_B"),
+ PINCTRL_PIN(208, "UFS_REFCLK0"),
+ PINCTRL_PIN(209, "EMMC_SD_CLK"),
+ PINCTRL_PIN(210, "EMMC_SD_D0"),
+ PINCTRL_PIN(211, "EMMC_SD_D1"),
+ PINCTRL_PIN(212, "EMMC_SD_D2"),
+ PINCTRL_PIN(213, "EMMC_SD_D3"),
+ PINCTRL_PIN(214, "EMMC_D4"),
+ PINCTRL_PIN(215, "EMMC_D5"),
+ PINCTRL_PIN(216, "EMMC_D6"),
+ PINCTRL_PIN(217, "EMMC_D7"),
+ PINCTRL_PIN(218, "EMMC_SD_CMD"),
+ PINCTRL_PIN(219, "EMMC_RCLK"),
+ PINCTRL_PIN(220, "SDCARD_CLK_FB"),
+ PINCTRL_PIN(221, "SD_Virtual_GPIO"),
+ PINCTRL_PIN(222, "OSC_CLK_OUT_NFC"),
+ PINCTRL_PIN(223, "OSC_CLK_OUT_CAM_0"),
+ PINCTRL_PIN(224, "OSC_CLK_OUT_CAM_1"),
+ PINCTRL_PIN(225, "OSC_CLK_OUT_CAM_2"),
+ PINCTRL_PIN(226, "OSC_CLK_OUT_CAM_3"),
+ PINCTRL_PIN(227, "PCIe_LINKDOWN"),
+ PINCTRL_PIN(228, "NFC_CLK_REQ"),
+ PINCTRL_PIN(229, "PCIE_CLKREQ_N_DEV2"),
+ PINCTRL_PIN(230, "PCIE_CLKREQ_N_DEV3"),
+ PINCTRL_PIN(231, "PCIE_CLKREQ_N_DEV4"),
+ PINCTRL_PIN(232, "PCIE_CLKREQ_N_DEV1"),
+ PINCTRL_PIN(233, "PCIE_CLKREQ_N_DEV0"),
+ PINCTRL_PIN(234, "GMBUS_1_SCL"),
+ PINCTRL_PIN(235, "GMBUS_1_SDA"),
+ PINCTRL_PIN(236, "GMBUS_0_SCL"),
+ PINCTRL_PIN(237, "GMBUS_0_SDA"),
+ /* SOUTHEAST */
+ PINCTRL_PIN(238, "COMPUTE_PMIC_SVID_DATA"),
+ PINCTRL_PIN(239, "COMPUTE_PMIC_SVID_CLK"),
+ PINCTRL_PIN(240, "COMPUTE_PMIC_SVID_ALERT_B"),
+ PINCTRL_PIN(241, "ROP_PMIC_I2C_SCL"),
+ PINCTRL_PIN(242, "ROP_PMIC_I2C_SDA"),
+ PINCTRL_PIN(243, "ISH_TYPEC_I2C2_SDA"),
+ PINCTRL_PIN(244, "ISH_TYPEC_I2C2_SCL"),
+ PINCTRL_PIN(245, "COMPUTE_PMU_PROCHOT_B"),
+ PINCTRL_PIN(246, "PMU_CATERR_B"),
+ PINCTRL_PIN(247, "COMPUTE_PMIC_VR_READY"),
+ PINCTRL_PIN(248, "FORCE_FW_RELOAD"),
+ PINCTRL_PIN(249, "ROP_PMIC_IRQ_ISH_GPIO31_TPC_ALERT_B"),
+ PINCTRL_PIN(250, "ROP_PMIC_RESET_B"),
+ PINCTRL_PIN(251, "ROP_PMIC_STNBY_SLP_S0_B"),
+ PINCTRL_PIN(252, "ROP_PMIC_THERMTRIP_B"),
+ PINCTRL_PIN(253, "MODEM_CLKREQ"),
+ PINCTRL_PIN(254, "TPC0_BSSB_SBU1"),
+ PINCTRL_PIN(255, "TPC0_BSSB_SBU2"),
+ PINCTRL_PIN(256, "OSC_CLK_OUT_CAM_4"),
+ PINCTRL_PIN(257, "HPD1"),
+ PINCTRL_PIN(258, "HPD0"),
+ PINCTRL_PIN(259, "PMC_TIME_SYNC_0"),
+ PINCTRL_PIN(260, "PMC_TIME_SYNC_1"),
+ PINCTRL_PIN(261, "OSC_CLK_OUT_CAM_5"),
+ PINCTRL_PIN(262, "ISH_GP_20"),
+ PINCTRL_PIN(263, "ISH_GP_16"),
+ PINCTRL_PIN(264, "ISH_GP_17"),
+ PINCTRL_PIN(265, "ISH_GP_18"),
+ PINCTRL_PIN(266, "ISH_GP_19"),
+};
+
+static const struct intel_padgroup lkf_community0_gpps[] = {
+ LKF_GPP(0, 0, 31, 0), /* EAST_0 */
+ LKF_GPP(1, 32, 59, 32), /* EAST_1 */
+};
+
+static const struct intel_padgroup lkf_community1_gpps[] = {
+ LKF_GPP(0, 60, 91, 64), /* NORTHWEST_0 */
+ LKF_GPP(1, 92, 123, 96), /* NORTHWEST_1 */
+ LKF_GPP(2, 124, 148, 128), /* NORTHWEST_2 */
+};
+
+static const struct intel_padgroup lkf_community2_gpps[] = {
+ LKF_GPP(0, 149, 180, 160), /* WEST_0 */
+ LKF_GPP(1, 181, 212, 192), /* WEST_1 */
+ LKF_GPP(2, 213, 237, 224), /* WEST_2 */
+};
+
+static const struct intel_padgroup lkf_community3_gpps[] = {
+ LKF_GPP(0, 238, 266, 256), /* SOUTHEAST */
+};
+
+static const struct intel_community lkf_communities[] = {
+ LKF_COMMUNITY(0, 0, 59, lkf_community0_gpps), /* EAST */
+ LKF_COMMUNITY(1, 60, 148, lkf_community1_gpps), /* NORTHWEST */
+ LKF_COMMUNITY(2, 149, 237, lkf_community2_gpps), /* WEST */
+ LKF_COMMUNITY(3, 238, 266, lkf_community3_gpps), /* SOUTHEAST */
+};
+
+static const struct intel_pinctrl_soc_data lkf_soc_data = {
+ .pins = lkf_pins,
+ .npins = ARRAY_SIZE(lkf_pins),
+ .communities = lkf_communities,
+ .ncommunities = ARRAY_SIZE(lkf_communities),
+};
+
+static const struct acpi_device_id lkf_pinctrl_acpi_match[] = {
+ { "INT34C4", (kernel_ulong_t)&lkf_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, lkf_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(lkf_pinctrl_pm_ops);
+
+static struct platform_driver lkf_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_hid,
+ .driver = {
+ .name = "lakefield-pinctrl",
+ .acpi_match_table = lkf_pinctrl_acpi_match,
+ .pm = &lkf_pinctrl_pm_ops,
+ },
+};
+module_platform_driver(lkf_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Lakefield PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 96589d01fe35..0a48ca46ab59 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -496,7 +496,7 @@ static int lp_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
enum pin_config_param param = pinconf_to_config_param(*config);
unsigned long flags;
u32 value, pull;
- u16 arg = 0;
+ u16 arg;
raw_spin_lock_irqsave(&lg->lock, flags);
value = ioread32(conf2);
@@ -506,8 +506,9 @@ static int lp_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- if (pull)
+ if (pull != GPIWP_NONE)
return -EINVAL;
+ arg = 0;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
if (pull != GPIWP_DOWN)
@@ -550,6 +551,7 @@ static int lp_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
value &= ~GPIWP_MASK;
+ value |= GPIWP_NONE;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
value &= ~GPIWP_MASK;
@@ -872,6 +874,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
gc->direction_output = lp_gpio_direction_output;
gc->get = lp_gpio_get;
gc->set = lp_gpio_set;
+ gc->set_config = gpiochip_generic_config;
gc->get_direction = lp_gpio_get_direction;
gc->base = -1;
gc->ngpio = LP_NUM_GPIO;
@@ -967,13 +970,12 @@ static int __init lp_gpio_init(void)
{
return platform_driver_register(&lp_gpio_driver);
}
+subsys_initcall(lp_gpio_init);
static void __exit lp_gpio_exit(void)
{
platform_driver_unregister(&lp_gpio_driver);
}
-
-subsys_initcall(lp_gpio_init);
module_exit(lp_gpio_exit);
MODULE_AUTHOR("Mathias Nyman (Intel)");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 38c5e166fd0f..68eee881ee3d 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -501,12 +501,6 @@ static int mt7622_pwm_ch6_2_pins[] = { 81, };
static int mt7622_pwm_ch6_2_funcs[] = { 4, };
static int mt7622_pwm_ch6_3_pins[] = { 100, };
static int mt7622_pwm_ch6_3_funcs[] = { 0, };
-static int mt7622_pwm_ch7_0_pins[] = { 70, };
-static int mt7622_pwm_ch7_0_funcs[] = { 3, };
-static int mt7622_pwm_ch7_1_pins[] = { 82, };
-static int mt7622_pwm_ch7_1_funcs[] = { 4, };
-static int mt7622_pwm_ch7_2_pins[] = { 101, };
-static int mt7622_pwm_ch7_2_funcs[] = { 0, };
/* SD */
static int mt7622_sd_0_pins[] = { 16, 17, 18, 19, 20, 21, };
@@ -703,9 +697,6 @@ static const struct group_desc mt7622_groups[] = {
PINCTRL_PIN_GROUP("pwm_ch6_1", mt7622_pwm_ch6_1),
PINCTRL_PIN_GROUP("pwm_ch6_2", mt7622_pwm_ch6_2),
PINCTRL_PIN_GROUP("pwm_ch6_3", mt7622_pwm_ch6_3),
- PINCTRL_PIN_GROUP("pwm_ch7_0", mt7622_pwm_ch7_0),
- PINCTRL_PIN_GROUP("pwm_ch7_1", mt7622_pwm_ch7_1),
- PINCTRL_PIN_GROUP("pwm_ch7_2", mt7622_pwm_ch7_2),
PINCTRL_PIN_GROUP("sd_0", mt7622_sd_0),
PINCTRL_PIN_GROUP("sd_1", mt7622_sd_1),
PINCTRL_PIN_GROUP("snfi", mt7622_snfi),
@@ -802,9 +793,7 @@ static const char *mt7622_pwm_groups[] = { "pwm_ch1_0", "pwm_ch1_1",
"pwm_ch4_3", "pwm_ch5_0",
"pwm_ch5_1", "pwm_ch5_2",
"pwm_ch6_0", "pwm_ch6_1",
- "pwm_ch6_2", "pwm_ch6_3",
- "pwm_ch7_0", "pwm_ch7_1",
- "pwm_ch7_2", };
+ "pwm_ch6_2", "pwm_ch6_3", };
static const char *mt7622_sd_groups[] = { "sd_0", "sd_1", };
static const char *mt7622_spic_groups[] = { "spic0_0", "spic0_1", "spic1_0",
"spic1_1", "spic2_0",
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 7e950f5d62d0..72f17f26acd8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -488,14 +488,8 @@ EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get);
int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc)
{
- int err;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN,
- MTK_DISABLE);
- if (err)
- return err;
-
- return 0;
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN,
+ MTK_DISABLE);
}
EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_set_rev1);
@@ -926,6 +920,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
err = hw->soc->bias_set(hw, desc, pullup);
if (err)
return err;
+ } else if (hw->soc->bias_set_combo) {
+ err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
+ if (err)
+ return err;
} else {
return -ENOTSUPP;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 623af4410b07..039ce9be19c5 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -247,13 +247,13 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR, !!arg);
break;
case PIN_CONFIG_OUTPUT:
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_OUTPUT);
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
+ arg);
if (err)
goto err;
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
- arg);
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+ MTK_OUTPUT);
break;
case PIN_CONFIG_INPUT_SCHMITT:
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index 3cb119105ddb..b2855e341a75 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig PINCTRL_MESON
- bool "Amlogic SoC pinctrl drivers"
+ tristate "Amlogic SoC pinctrl drivers"
depends on ARCH_MESON
depends on OF
+ default y
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -25,37 +26,37 @@ config PINCTRL_MESON8B
default y
config PINCTRL_MESON_GXBB
- bool "Meson gxbb SoC pinctrl driver"
+ tristate "Meson gxbb SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON8_PMX
default y
config PINCTRL_MESON_GXL
- bool "Meson gxl SoC pinctrl driver"
+ tristate "Meson gxl SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON8_PMX
default y
config PINCTRL_MESON8_PMX
- bool
+ tristate
config PINCTRL_MESON_AXG
- bool "Meson axg Soc pinctrl driver"
+ tristate "Meson axg Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
default y
config PINCTRL_MESON_AXG_PMX
- bool
+ tristate
config PINCTRL_MESON_G12A
- bool "Meson g12a Soc pinctrl driver"
+ tristate "Meson g12a Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
default y
config PINCTRL_MESON_A1
- bool "Meson a1 Soc pinctrl driver"
+ tristate "Meson a1 Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
default y
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
index 8abf750eac7e..79f5d753d7e1 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -925,6 +925,7 @@ static const struct of_device_id meson_a1_pinctrl_dt_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, meson_a1_pinctrl_dt_match);
static struct platform_driver meson_a1_pinctrl_driver = {
.probe = meson_pinctrl_probe,
@@ -934,4 +935,5 @@ static struct platform_driver meson_a1_pinctrl_driver = {
},
};
-builtin_platform_driver(meson_a1_pinctrl_driver);
+module_platform_driver(meson_a1_pinctrl_driver);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
index e8931d9cf863..80c43683c789 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
@@ -116,3 +116,6 @@ const struct pinmux_ops meson_axg_pmx_ops = {
.get_function_groups = meson_pmx_get_groups,
.gpio_request_enable = meson_axg_pmx_request_gpio,
};
+EXPORT_SYMBOL_GPL(meson_axg_pmx_ops);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 072765db93d7..7bfecdfba177 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -1080,6 +1080,7 @@ static const struct of_device_id meson_axg_pinctrl_dt_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, meson_axg_pinctrl_dt_match);
static struct platform_driver meson_axg_pinctrl_driver = {
.probe = meson_pinctrl_probe,
@@ -1089,4 +1090,5 @@ static struct platform_driver meson_axg_pinctrl_driver = {
},
};
-builtin_platform_driver(meson_axg_pinctrl_driver);
+module_platform_driver(meson_axg_pinctrl_driver);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 41850e3c0091..cd9656b13836 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -1410,6 +1410,7 @@ static const struct of_device_id meson_g12a_pinctrl_dt_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, meson_g12a_pinctrl_dt_match);
static struct platform_driver meson_g12a_pinctrl_driver = {
.probe = meson_pinctrl_probe,
@@ -1419,4 +1420,5 @@ static struct platform_driver meson_g12a_pinctrl_driver = {
},
};
-builtin_platform_driver(meson_g12a_pinctrl_driver);
+module_platform_driver(meson_g12a_pinctrl_driver);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index d130c635f74b..f51fc3939252 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -900,6 +900,7 @@ static const struct of_device_id meson_gxbb_pinctrl_dt_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, meson_gxbb_pinctrl_dt_match);
static struct platform_driver meson_gxbb_pinctrl_driver = {
.probe = meson_pinctrl_probe,
@@ -908,4 +909,5 @@ static struct platform_driver meson_gxbb_pinctrl_driver = {
.of_match_table = meson_gxbb_pinctrl_dt_match,
},
};
-builtin_platform_driver(meson_gxbb_pinctrl_driver);
+module_platform_driver(meson_gxbb_pinctrl_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 32552d795bb2..51408996255b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -861,6 +861,7 @@ static const struct of_device_id meson_gxl_pinctrl_dt_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, meson_gxl_pinctrl_dt_match);
static struct platform_driver meson_gxl_pinctrl_driver = {
.probe = meson_pinctrl_probe,
@@ -869,4 +870,5 @@ static struct platform_driver meson_gxl_pinctrl_driver = {
.of_match_table = meson_gxl_pinctrl_dt_match,
},
};
-builtin_platform_driver(meson_gxl_pinctrl_driver);
+module_platform_driver(meson_gxl_pinctrl_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 20683cd072bb..49851444a6e3 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -152,6 +152,7 @@ int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev)
return pc->data->num_funcs;
}
+EXPORT_SYMBOL_GPL(meson_pmx_get_funcs_count);
const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
unsigned selector)
@@ -160,6 +161,7 @@ const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
return pc->data->funcs[selector].name;
}
+EXPORT_SYMBOL_GPL(meson_pmx_get_func_name);
int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
const char * const **groups,
@@ -172,6 +174,7 @@ int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
return 0;
}
+EXPORT_SYMBOL_GPL(meson_pmx_get_groups);
static int meson_pinconf_set_gpio_bit(struct meson_pinctrl *pc,
unsigned int pin,
@@ -723,6 +726,7 @@ int meson8_aobus_parse_dt_extra(struct meson_pinctrl *pc)
return 0;
}
+EXPORT_SYMBOL_GPL(meson8_aobus_parse_dt_extra);
int meson_a1_parse_dt_extra(struct meson_pinctrl *pc)
{
@@ -732,6 +736,7 @@ int meson_a1_parse_dt_extra(struct meson_pinctrl *pc)
return 0;
}
+EXPORT_SYMBOL_GPL(meson_a1_parse_dt_extra);
int meson_pinctrl_probe(struct platform_device *pdev)
{
@@ -766,3 +771,6 @@ int meson_pinctrl_probe(struct platform_device *pdev)
return meson_gpiolib_register(pc);
}
+EXPORT_SYMBOL_GPL(meson_pinctrl_probe);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index f8b0ff9d419a..ff5372e0a475 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -10,6 +10,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
+#include <linux/module.h>
struct meson_pinctrl;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
index 66a908f9f13d..f767b6923f9f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
@@ -100,3 +100,5 @@ const struct pinmux_ops meson8_pmx_ops = {
.get_function_groups = meson_pmx_get_groups,
.gpio_request_enable = meson8_pmx_request_gpio,
};
+EXPORT_SYMBOL_GPL(meson8_pmx_ops);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index acad3887cc74..0b9b6cbfd10c 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -421,6 +421,8 @@ static const unsigned lcd_d0_d7_a_1_pins[] = {
/* D8 thru D11 often used as TVOUT lines */
static const unsigned lcd_d8_d11_a_1_pins[] = { DB8500_PIN_F4,
DB8500_PIN_E3, DB8500_PIN_E4, DB8500_PIN_D2 };
+static const unsigned lcd_d12_d15_a_1_pins[] = {
+ DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5 };
static const unsigned lcd_d12_d23_a_1_pins[] = {
DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5,
DB8500_PIN_C6, DB8500_PIN_B3, DB8500_PIN_C4, DB8500_PIN_E6,
@@ -535,6 +537,9 @@ static const unsigned lcd_b_1_pins[] = { DB8500_PIN_D17, DB8500_PIN_D16,
DB8500_PIN_A18, DB8500_PIN_C18, DB8500_PIN_B19, DB8500_PIN_B20,
DB8500_PIN_D21, DB8500_PIN_D20, DB8500_PIN_C20, DB8500_PIN_B21,
DB8500_PIN_C21, DB8500_PIN_A22, DB8500_PIN_B24, DB8500_PIN_C22 };
+static const unsigned lcd_d16_d23_b_1_pins[] = {
+ DB8500_PIN_D21, DB8500_PIN_D20, DB8500_PIN_C20, DB8500_PIN_B21,
+ DB8500_PIN_C21, DB8500_PIN_A22, DB8500_PIN_B24, DB8500_PIN_C22 };
static const unsigned ddrtrig_b_1_pins[] = { DB8500_PIN_AJ27 };
static const unsigned pwl_b_1_pins[] = { DB8500_PIN_AF25 };
static const unsigned spi1_b_1_pins[] = { DB8500_PIN_AG15, DB8500_PIN_AF13,
@@ -689,6 +694,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(lcdvsi1_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(lcd_d0_d7_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(lcd_d8_d11_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(lcd_d12_d15_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(lcd_d12_d23_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(kpskaskb_a_1, NMK_GPIO_ALT_A),
@@ -741,6 +747,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(lcdaclk_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(lcda_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(lcd_b_1, NMK_GPIO_ALT_B),
+ DB8500_PIN_GROUP(lcd_d16_d23_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(ddrtrig_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(pwl_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(spi1_b_1, NMK_GPIO_ALT_B),
@@ -846,7 +853,8 @@ DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_a_2", "mc0_dat47_a_1", "mc0dat31dir_a_1"
DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1");
DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");
DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1",
- "lcd_d8_d11_a_1", "lcd_d12_d23_a_1", "lcd_b_1");
+ "lcd_d8_d11_a_1", "lcd_d12_d15_a_1", "lcd_d12_d23_a_1", "lcd_b_1",
+ "lcd_d16_d23_b_1");
DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_a_2", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1");
DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1");
DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 657e35a75d84..abfe11c7b49f 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -948,7 +948,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
(mode < 0) ? "unknown" : modes[mode]);
} else {
int irq = chip->to_irq(chip, offset);
- struct irq_desc *desc = irq_to_desc(irq);
const int pullidx = pull ? 1 : 0;
int val;
static const char * const pulls[] = {
@@ -969,8 +968,9 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
* This races with request_irq(), set_irq_type(),
* and set_irq_wake() ... but those are "rare".
*/
- if (irq > 0 && desc && desc->action) {
+ if (irq > 0 && irq_has_action(irq)) {
char *trigger;
+ bool wake;
if (nmk_chip->edge_rising & BIT(offset))
trigger = "edge-rising";
@@ -979,10 +979,10 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
else
trigger = "edge-undefined";
+ wake = !!(nmk_chip->real_wake & BIT(offset));
+
seq_printf(s, " irq-%d %s%s",
- irq, trigger,
- irqd_is_wakeup_set(&desc->irq_data)
- ? " wakeup" : "");
+ irq, trigger, wake ? " wakeup" : "");
}
}
clk_disable(nmk_chip->clk);
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 899c16c17b6d..2d4acf21117c 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -197,10 +197,16 @@ static int amd_gpio_set_config(struct gpio_chip *gc, unsigned offset,
static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
{
u32 pin_reg;
+ u32 db_cntrl;
unsigned long flags;
unsigned int bank, i, pin_num;
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ bool tmr_out_unit;
+ unsigned int time;
+ unsigned int unit;
+ bool tmr_large;
+
char *level_trig;
char *active_level;
char *interrupt_enable;
@@ -214,6 +220,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *pull_down_enable;
char *output_value;
char *output_enable;
+ char debounce_value[40];
+ char *debounce_enable;
for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
seq_printf(s, "GPIO bank%d\t", bank);
@@ -327,13 +335,44 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
pin_sts = "input is low|";
}
+ db_cntrl = (DB_CNTRl_MASK << DB_CNTRL_OFF) & pin_reg;
+ if (db_cntrl) {
+ tmr_out_unit = pin_reg & BIT(DB_TMR_OUT_UNIT_OFF);
+ tmr_large = pin_reg & BIT(DB_TMR_LARGE_OFF);
+ time = pin_reg & DB_TMR_OUT_MASK;
+ if (tmr_large) {
+ if (tmr_out_unit)
+ unit = 62500;
+ else
+ unit = 15625;
+ } else {
+ if (tmr_out_unit)
+ unit = 244;
+ else
+ unit = 61;
+ }
+ if ((DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF) == db_cntrl)
+ debounce_enable = "debouncing filter (high and low) enabled|";
+ else if ((DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF) == db_cntrl)
+ debounce_enable = "debouncing filter (low) enabled|";
+ else
+ debounce_enable = "debouncing filter (high) enabled|";
+
+ snprintf(debounce_value, sizeof(debounce_value),
+ "debouncing timeout is %u (us)|", time * unit);
+ } else {
+ debounce_enable = "debouncing filter disabled|";
+ snprintf(debounce_value, sizeof(debounce_value), " ");
+ }
+
seq_printf(s, "%s %s %s %s %s %s\n"
- " %s %s %s %s %s %s %s 0x%x\n",
+ " %s %s %s %s %s %s %s %s %s 0x%x\n",
level_trig, active_level, interrupt_enable,
interrupt_mask, wake_cntrl0, wake_cntrl1,
wake_cntrl2, pin_sts, pull_up_sel,
pull_up_enable, pull_down_enable,
- output_value, output_enable, pin_reg);
+ output_value, output_enable,
+ debounce_enable, debounce_value, pin_reg);
}
}
}
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 578b387100d9..36c6078b93b3 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -71,8 +71,15 @@
/* Custom pinconf parameters */
#define ATMEL_PIN_CONFIG_DRIVE_STRENGTH (PIN_CONFIG_END + 1)
+/**
+ * struct atmel_pioctrl_data - Atmel PIO controller (pinmux + gpio) data struct
+ * @nbanks: number of PIO banks
+ * @last_bank_count: number of lines in the last bank (can be less than
+ * the rest of the banks).
+ */
struct atmel_pioctrl_data {
unsigned nbanks;
+ unsigned last_bank_count;
};
struct atmel_group {
@@ -980,11 +987,13 @@ static const struct dev_pm_ops atmel_pctrl_pm_ops = {
* We can have up to 16 banks.
*/
static const struct atmel_pioctrl_data atmel_sama5d2_pioctrl_data = {
- .nbanks = 4,
+ .nbanks = 4,
+ .last_bank_count = ATMEL_PIO_NPINS_PER_BANK,
};
static const struct atmel_pioctrl_data microchip_sama7g5_pioctrl_data = {
- .nbanks = 5,
+ .nbanks = 5,
+ .last_bank_count = 8, /* sama7g5 has only PE0 to PE7 */
};
static const struct of_device_id atmel_pctrl_of_match[] = {
@@ -1025,6 +1034,11 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl_data = match->data;
atmel_pioctrl->nbanks = atmel_pioctrl_data->nbanks;
atmel_pioctrl->npins = atmel_pioctrl->nbanks * ATMEL_PIO_NPINS_PER_BANK;
+ /* if last bank has limited number of pins, adjust accordingly */
+ if (atmel_pioctrl_data->last_bank_count != ATMEL_PIO_NPINS_PER_BANK) {
+ atmel_pioctrl->npins -= ATMEL_PIO_NPINS_PER_BANK;
+ atmel_pioctrl->npins += atmel_pioctrl_data->last_bank_count;
+ }
atmel_pioctrl->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pioctrl->reg_base))
@@ -1127,8 +1141,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
}
atmel_pioctrl->irqs[i] = res->start;
- irq_set_chained_handler(res->start, atmel_gpio_irq_handler);
- irq_set_handler_data(res->start, atmel_pioctrl);
+ irq_set_chained_handler_and_data(res->start,
+ atmel_gpio_irq_handler, atmel_pioctrl);
dev_dbg(dev, "bank %i: irq=%pr\n", i, res);
}
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index 62c02b969327..7521a924dffb 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -431,24 +431,28 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
/* load and remap the pad resources of the different banks */
for_each_compatible_node(np, NULL, "lantiq,pad-falcon") {
- struct platform_device *ppdev = of_find_device_by_node(np);
const __be32 *bank = of_get_property(np, "lantiq,bank", NULL);
struct resource res;
+ struct platform_device *ppdev;
u32 avail;
int pins;
if (!of_device_is_available(np))
continue;
- if (!ppdev) {
- dev_err(&pdev->dev, "failed to find pad pdev\n");
- continue;
- }
if (!bank || *bank >= PORTS)
continue;
if (of_address_to_resource(np, 0, &res))
continue;
+
+ ppdev = of_find_device_by_node(np);
+ if (!ppdev) {
+ dev_err(&pdev->dev, "failed to find pad pdev\n");
+ continue;
+ }
+
falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
+ put_device(&ppdev->dev);
if (IS_ERR(falcon_info.clk[*bank])) {
dev_err(&ppdev->dev, "failed to get clock\n");
of_node_put(np);
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 621909b01deb..3ea163498647 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -37,11 +37,11 @@
#define JZ4740_GPIO_TRIG 0x70
#define JZ4740_GPIO_FLAG 0x80
-#define JZ4760_GPIO_INT 0x10
-#define JZ4760_GPIO_PAT1 0x30
-#define JZ4760_GPIO_PAT0 0x40
-#define JZ4760_GPIO_FLAG 0x50
-#define JZ4760_GPIO_PEN 0x70
+#define JZ4770_GPIO_INT 0x10
+#define JZ4770_GPIO_PAT1 0x30
+#define JZ4770_GPIO_PAT0 0x40
+#define JZ4770_GPIO_FLAG 0x50
+#define JZ4770_GPIO_PEN 0x70
#define X1830_GPIO_PEL 0x110
#define X1830_GPIO_PEH 0x120
@@ -134,61 +134,42 @@ static int jz4740_pwm_pwm5_pins[] = { 0x7c, };
static int jz4740_pwm_pwm6_pins[] = { 0x7e, };
static int jz4740_pwm_pwm7_pins[] = { 0x7f, };
-static int jz4740_mmc_1bit_funcs[] = { 0, 0, 0, };
-static int jz4740_mmc_4bit_funcs[] = { 0, 0, 0, };
-static int jz4740_uart0_data_funcs[] = { 1, 1, };
-static int jz4740_uart0_hwflow_funcs[] = { 1, 1, };
-static int jz4740_uart1_data_funcs[] = { 2, 2, };
-static int jz4740_lcd_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4740_lcd_16bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4740_lcd_18bit_funcs[] = { 0, 0, };
-static int jz4740_lcd_18bit_tft_funcs[] = { 0, 0, 0, 0, };
-static int jz4740_nand_cs1_funcs[] = { 0, };
-static int jz4740_nand_cs2_funcs[] = { 0, };
-static int jz4740_nand_cs3_funcs[] = { 0, };
-static int jz4740_nand_cs4_funcs[] = { 0, };
-static int jz4740_nand_fre_fwe_funcs[] = { 0, 0, };
-static int jz4740_pwm_pwm0_funcs[] = { 0, };
-static int jz4740_pwm_pwm1_funcs[] = { 0, };
-static int jz4740_pwm_pwm2_funcs[] = { 0, };
-static int jz4740_pwm_pwm3_funcs[] = { 0, };
-static int jz4740_pwm_pwm4_funcs[] = { 0, };
-static int jz4740_pwm_pwm5_funcs[] = { 0, };
-static int jz4740_pwm_pwm6_funcs[] = { 0, };
-static int jz4740_pwm_pwm7_funcs[] = { 0, };
-
-#define INGENIC_PIN_GROUP(name, id) \
+
+#define INGENIC_PIN_GROUP_FUNCS(name, id, funcs) \
{ \
name, \
id##_pins, \
ARRAY_SIZE(id##_pins), \
- id##_funcs, \
+ funcs, \
}
+#define INGENIC_PIN_GROUP(name, id, func) \
+ INGENIC_PIN_GROUP_FUNCS(name, id, (void *)(func))
+
static const struct group_desc jz4740_groups[] = {
- INGENIC_PIN_GROUP("mmc-1bit", jz4740_mmc_1bit),
- INGENIC_PIN_GROUP("mmc-4bit", jz4740_mmc_4bit),
- INGENIC_PIN_GROUP("uart0-data", jz4740_uart0_data),
- INGENIC_PIN_GROUP("uart0-hwflow", jz4740_uart0_hwflow),
- INGENIC_PIN_GROUP("uart1-data", jz4740_uart1_data),
- INGENIC_PIN_GROUP("lcd-8bit", jz4740_lcd_8bit),
- INGENIC_PIN_GROUP("lcd-16bit", jz4740_lcd_16bit),
- INGENIC_PIN_GROUP("lcd-18bit", jz4740_lcd_18bit),
- INGENIC_PIN_GROUP("lcd-18bit-tft", jz4740_lcd_18bit_tft),
+ INGENIC_PIN_GROUP("mmc-1bit", jz4740_mmc_1bit, 0),
+ INGENIC_PIN_GROUP("mmc-4bit", jz4740_mmc_4bit, 0),
+ INGENIC_PIN_GROUP("uart0-data", jz4740_uart0_data, 1),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4740_uart0_hwflow, 1),
+ INGENIC_PIN_GROUP("uart1-data", jz4740_uart1_data, 2),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4740_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4740_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4740_lcd_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit-tft", jz4740_lcd_18bit_tft, 0),
{ "lcd-no-pins", },
- INGENIC_PIN_GROUP("nand-cs1", jz4740_nand_cs1),
- INGENIC_PIN_GROUP("nand-cs2", jz4740_nand_cs2),
- INGENIC_PIN_GROUP("nand-cs3", jz4740_nand_cs3),
- INGENIC_PIN_GROUP("nand-cs4", jz4740_nand_cs4),
- INGENIC_PIN_GROUP("nand-fre-fwe", jz4740_nand_fre_fwe),
- INGENIC_PIN_GROUP("pwm0", jz4740_pwm_pwm0),
- INGENIC_PIN_GROUP("pwm1", jz4740_pwm_pwm1),
- INGENIC_PIN_GROUP("pwm2", jz4740_pwm_pwm2),
- INGENIC_PIN_GROUP("pwm3", jz4740_pwm_pwm3),
- INGENIC_PIN_GROUP("pwm4", jz4740_pwm_pwm4),
- INGENIC_PIN_GROUP("pwm5", jz4740_pwm_pwm5),
- INGENIC_PIN_GROUP("pwm6", jz4740_pwm_pwm6),
- INGENIC_PIN_GROUP("pwm7", jz4740_pwm_pwm7),
+ INGENIC_PIN_GROUP("nand-cs1", jz4740_nand_cs1, 0),
+ INGENIC_PIN_GROUP("nand-cs2", jz4740_nand_cs2, 0),
+ INGENIC_PIN_GROUP("nand-cs3", jz4740_nand_cs3, 0),
+ INGENIC_PIN_GROUP("nand-cs4", jz4740_nand_cs4, 0),
+ INGENIC_PIN_GROUP("nand-fre-fwe", jz4740_nand_fre_fwe, 0),
+ INGENIC_PIN_GROUP("pwm0", jz4740_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4740_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", jz4740_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4740_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("pwm4", jz4740_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("pwm5", jz4740_pwm_pwm5, 0),
+ INGENIC_PIN_GROUP("pwm6", jz4740_pwm_pwm6, 0),
+ INGENIC_PIN_GROUP("pwm7", jz4740_pwm_pwm7, 0),
};
static const char *jz4740_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
@@ -268,54 +249,33 @@ static int jz4725b_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, };
static int jz4725b_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
static int jz4725b_lcd_generic_pins[] = { 0x75, };
-static int jz4725b_mmc0_1bit_funcs[] = { 1, 1, 1, };
-static int jz4725b_mmc0_4bit_funcs[] = { 1, 0, 1, };
-static int jz4725b_mmc1_1bit_funcs[] = { 0, 0, 0, };
-static int jz4725b_mmc1_4bit_funcs[] = { 0, 0, 0, };
-static int jz4725b_uart_data_funcs[] = { 1, 1, };
-static int jz4725b_nand_cs1_funcs[] = { 0, };
-static int jz4725b_nand_cs2_funcs[] = { 0, };
-static int jz4725b_nand_cs3_funcs[] = { 0, };
-static int jz4725b_nand_cs4_funcs[] = { 0, };
-static int jz4725b_nand_cle_ale_funcs[] = { 0, 0, };
-static int jz4725b_nand_fre_fwe_funcs[] = { 0, 0, };
-static int jz4725b_pwm_pwm0_funcs[] = { 0, };
-static int jz4725b_pwm_pwm1_funcs[] = { 0, };
-static int jz4725b_pwm_pwm2_funcs[] = { 0, };
-static int jz4725b_pwm_pwm3_funcs[] = { 0, };
-static int jz4725b_pwm_pwm4_funcs[] = { 0, };
-static int jz4725b_pwm_pwm5_funcs[] = { 0, };
-static int jz4725b_lcd_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4725b_lcd_16bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4725b_lcd_18bit_funcs[] = { 0, 0, };
-static int jz4725b_lcd_24bit_funcs[] = { 1, 1, 1, 1, };
-static int jz4725b_lcd_special_funcs[] = { 0, 0, 0, 0, };
-static int jz4725b_lcd_generic_funcs[] = { 0, };
+static u8 jz4725b_mmc0_4bit_funcs[] = { 1, 0, 1, };
static const struct group_desc jz4725b_groups[] = {
- INGENIC_PIN_GROUP("mmc0-1bit", jz4725b_mmc0_1bit),
- INGENIC_PIN_GROUP("mmc0-4bit", jz4725b_mmc0_4bit),
- INGENIC_PIN_GROUP("mmc1-1bit", jz4725b_mmc1_1bit),
- INGENIC_PIN_GROUP("mmc1-4bit", jz4725b_mmc1_4bit),
- INGENIC_PIN_GROUP("uart-data", jz4725b_uart_data),
- INGENIC_PIN_GROUP("nand-cs1", jz4725b_nand_cs1),
- INGENIC_PIN_GROUP("nand-cs2", jz4725b_nand_cs2),
- INGENIC_PIN_GROUP("nand-cs3", jz4725b_nand_cs3),
- INGENIC_PIN_GROUP("nand-cs4", jz4725b_nand_cs4),
- INGENIC_PIN_GROUP("nand-cle-ale", jz4725b_nand_cle_ale),
- INGENIC_PIN_GROUP("nand-fre-fwe", jz4725b_nand_fre_fwe),
- INGENIC_PIN_GROUP("pwm0", jz4725b_pwm_pwm0),
- INGENIC_PIN_GROUP("pwm1", jz4725b_pwm_pwm1),
- INGENIC_PIN_GROUP("pwm2", jz4725b_pwm_pwm2),
- INGENIC_PIN_GROUP("pwm3", jz4725b_pwm_pwm3),
- INGENIC_PIN_GROUP("pwm4", jz4725b_pwm_pwm4),
- INGENIC_PIN_GROUP("pwm5", jz4725b_pwm_pwm5),
- INGENIC_PIN_GROUP("lcd-8bit", jz4725b_lcd_8bit),
- INGENIC_PIN_GROUP("lcd-16bit", jz4725b_lcd_16bit),
- INGENIC_PIN_GROUP("lcd-18bit", jz4725b_lcd_18bit),
- INGENIC_PIN_GROUP("lcd-24bit", jz4725b_lcd_24bit),
- INGENIC_PIN_GROUP("lcd-special", jz4725b_lcd_special),
- INGENIC_PIN_GROUP("lcd-generic", jz4725b_lcd_generic),
+ INGENIC_PIN_GROUP("mmc0-1bit", jz4725b_mmc0_1bit, 1),
+ INGENIC_PIN_GROUP_FUNCS("mmc0-4bit", jz4725b_mmc0_4bit,
+ jz4725b_mmc0_4bit_funcs),
+ INGENIC_PIN_GROUP("mmc1-1bit", jz4725b_mmc1_1bit, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit", jz4725b_mmc1_4bit, 0),
+ INGENIC_PIN_GROUP("uart-data", jz4725b_uart_data, 1),
+ INGENIC_PIN_GROUP("nand-cs1", jz4725b_nand_cs1, 0),
+ INGENIC_PIN_GROUP("nand-cs2", jz4725b_nand_cs2, 0),
+ INGENIC_PIN_GROUP("nand-cs3", jz4725b_nand_cs3, 0),
+ INGENIC_PIN_GROUP("nand-cs4", jz4725b_nand_cs4, 0),
+ INGENIC_PIN_GROUP("nand-cle-ale", jz4725b_nand_cle_ale, 0),
+ INGENIC_PIN_GROUP("nand-fre-fwe", jz4725b_nand_fre_fwe, 0),
+ INGENIC_PIN_GROUP("pwm0", jz4725b_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4725b_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", jz4725b_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4725b_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("pwm4", jz4725b_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("pwm5", jz4725b_pwm_pwm5, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4725b_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4725b_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4725b_lcd_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4725b_lcd_24bit, 1),
+ INGENIC_PIN_GROUP("lcd-special", jz4725b_lcd_special, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4725b_lcd_generic, 0),
};
static const char *jz4725b_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
@@ -431,110 +391,61 @@ static int jz4760_pwm_pwm5_pins[] = { 0x85, };
static int jz4760_pwm_pwm6_pins[] = { 0x6a, };
static int jz4760_pwm_pwm7_pins[] = { 0x6b, };
-static int jz4760_uart0_data_funcs[] = { 0, 0, };
-static int jz4760_uart0_hwflow_funcs[] = { 0, 0, };
-static int jz4760_uart1_data_funcs[] = { 0, 0, };
-static int jz4760_uart1_hwflow_funcs[] = { 0, 0, };
-static int jz4760_uart2_data_funcs[] = { 0, 0, };
-static int jz4760_uart2_hwflow_funcs[] = { 0, 0, };
-static int jz4760_uart3_data_funcs[] = { 0, 1, };
-static int jz4760_uart3_hwflow_funcs[] = { 0, 0, };
-static int jz4760_mmc0_1bit_a_funcs[] = { 1, 1, 0, };
-static int jz4760_mmc0_4bit_a_funcs[] = { 1, 1, 1, };
-static int jz4760_mmc0_1bit_e_funcs[] = { 0, 0, 0, };
-static int jz4760_mmc0_4bit_e_funcs[] = { 0, 0, 0, };
-static int jz4760_mmc0_8bit_e_funcs[] = { 0, 0, 0, 0, };
-static int jz4760_mmc1_1bit_d_funcs[] = { 0, 0, 0, };
-static int jz4760_mmc1_4bit_d_funcs[] = { 0, 0, 0, };
-static int jz4760_mmc1_1bit_e_funcs[] = { 1, 1, 1, };
-static int jz4760_mmc1_4bit_e_funcs[] = { 1, 1, 1, };
-static int jz4760_mmc1_8bit_e_funcs[] = { 1, 1, 1, 1, };
-static int jz4760_mmc2_1bit_b_funcs[] = { 0, 0, 0, };
-static int jz4760_mmc2_4bit_b_funcs[] = { 0, 0, 0, };
-static int jz4760_mmc2_1bit_e_funcs[] = { 2, 2, 2, };
-static int jz4760_mmc2_4bit_e_funcs[] = { 2, 2, 2, };
-static int jz4760_mmc2_8bit_e_funcs[] = { 2, 2, 2, 2, };
-static int jz4760_nemc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4760_nemc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4760_nemc_cle_ale_funcs[] = { 0, 0, };
-static int jz4760_nemc_addr_funcs[] = { 0, 0, 0, 0, };
-static int jz4760_nemc_rd_we_funcs[] = { 0, 0, };
-static int jz4760_nemc_frd_fwe_funcs[] = { 0, 0, };
-static int jz4760_nemc_wait_funcs[] = { 0, };
-static int jz4760_nemc_cs1_funcs[] = { 0, };
-static int jz4760_nemc_cs2_funcs[] = { 0, };
-static int jz4760_nemc_cs3_funcs[] = { 0, };
-static int jz4760_nemc_cs4_funcs[] = { 0, };
-static int jz4760_nemc_cs5_funcs[] = { 0, };
-static int jz4760_nemc_cs6_funcs[] = { 0, };
-static int jz4760_i2c0_funcs[] = { 0, 0, };
-static int jz4760_i2c1_funcs[] = { 0, 0, };
-static int jz4760_cim_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4760_lcd_24bit_funcs[] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0,
-};
-static int jz4760_pwm_pwm0_funcs[] = { 0, };
-static int jz4760_pwm_pwm1_funcs[] = { 0, };
-static int jz4760_pwm_pwm2_funcs[] = { 0, };
-static int jz4760_pwm_pwm3_funcs[] = { 0, };
-static int jz4760_pwm_pwm4_funcs[] = { 0, };
-static int jz4760_pwm_pwm5_funcs[] = { 0, };
-static int jz4760_pwm_pwm6_funcs[] = { 0, };
-static int jz4760_pwm_pwm7_funcs[] = { 0, };
+static u8 jz4760_uart3_data_funcs[] = { 0, 1, };
+static u8 jz4760_mmc0_1bit_a_funcs[] = { 1, 1, 0, };
static const struct group_desc jz4760_groups[] = {
- INGENIC_PIN_GROUP("uart0-data", jz4760_uart0_data),
- INGENIC_PIN_GROUP("uart0-hwflow", jz4760_uart0_hwflow),
- INGENIC_PIN_GROUP("uart1-data", jz4760_uart1_data),
- INGENIC_PIN_GROUP("uart1-hwflow", jz4760_uart1_hwflow),
- INGENIC_PIN_GROUP("uart2-data", jz4760_uart2_data),
- INGENIC_PIN_GROUP("uart2-hwflow", jz4760_uart2_hwflow),
- INGENIC_PIN_GROUP("uart3-data", jz4760_uart3_data),
- INGENIC_PIN_GROUP("uart3-hwflow", jz4760_uart3_hwflow),
- INGENIC_PIN_GROUP("mmc0-1bit-a", jz4760_mmc0_1bit_a),
- INGENIC_PIN_GROUP("mmc0-4bit-a", jz4760_mmc0_4bit_a),
- INGENIC_PIN_GROUP("mmc0-1bit-e", jz4760_mmc0_1bit_e),
- INGENIC_PIN_GROUP("mmc0-4bit-e", jz4760_mmc0_4bit_e),
- INGENIC_PIN_GROUP("mmc0-8bit-e", jz4760_mmc0_8bit_e),
- INGENIC_PIN_GROUP("mmc1-1bit-d", jz4760_mmc1_1bit_d),
- INGENIC_PIN_GROUP("mmc1-4bit-d", jz4760_mmc1_4bit_d),
- INGENIC_PIN_GROUP("mmc1-1bit-e", jz4760_mmc1_1bit_e),
- INGENIC_PIN_GROUP("mmc1-4bit-e", jz4760_mmc1_4bit_e),
- INGENIC_PIN_GROUP("mmc1-8bit-e", jz4760_mmc1_8bit_e),
- INGENIC_PIN_GROUP("mmc2-1bit-b", jz4760_mmc2_1bit_b),
- INGENIC_PIN_GROUP("mmc2-4bit-b", jz4760_mmc2_4bit_b),
- INGENIC_PIN_GROUP("mmc2-1bit-e", jz4760_mmc2_1bit_e),
- INGENIC_PIN_GROUP("mmc2-4bit-e", jz4760_mmc2_4bit_e),
- INGENIC_PIN_GROUP("mmc2-8bit-e", jz4760_mmc2_8bit_e),
- INGENIC_PIN_GROUP("nemc-8bit-data", jz4760_nemc_8bit_data),
- INGENIC_PIN_GROUP("nemc-16bit-data", jz4760_nemc_16bit_data),
- INGENIC_PIN_GROUP("nemc-cle-ale", jz4760_nemc_cle_ale),
- INGENIC_PIN_GROUP("nemc-addr", jz4760_nemc_addr),
- INGENIC_PIN_GROUP("nemc-rd-we", jz4760_nemc_rd_we),
- INGENIC_PIN_GROUP("nemc-frd-fwe", jz4760_nemc_frd_fwe),
- INGENIC_PIN_GROUP("nemc-wait", jz4760_nemc_wait),
- INGENIC_PIN_GROUP("nemc-cs1", jz4760_nemc_cs1),
- INGENIC_PIN_GROUP("nemc-cs2", jz4760_nemc_cs2),
- INGENIC_PIN_GROUP("nemc-cs3", jz4760_nemc_cs3),
- INGENIC_PIN_GROUP("nemc-cs4", jz4760_nemc_cs4),
- INGENIC_PIN_GROUP("nemc-cs5", jz4760_nemc_cs5),
- INGENIC_PIN_GROUP("nemc-cs6", jz4760_nemc_cs6),
- INGENIC_PIN_GROUP("i2c0-data", jz4760_i2c0),
- INGENIC_PIN_GROUP("i2c1-data", jz4760_i2c1),
- INGENIC_PIN_GROUP("cim-data", jz4760_cim),
- INGENIC_PIN_GROUP("lcd-24bit", jz4760_lcd_24bit),
+ INGENIC_PIN_GROUP("uart0-data", jz4760_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4760_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data", jz4760_uart1_data, 0),
+ INGENIC_PIN_GROUP("uart1-hwflow", jz4760_uart1_hwflow, 0),
+ INGENIC_PIN_GROUP("uart2-data", jz4760_uart2_data, 0),
+ INGENIC_PIN_GROUP("uart2-hwflow", jz4760_uart2_hwflow, 0),
+ INGENIC_PIN_GROUP_FUNCS("uart3-data", jz4760_uart3_data,
+ jz4760_uart3_data_funcs),
+ INGENIC_PIN_GROUP("uart3-hwflow", jz4760_uart3_hwflow, 0),
+ INGENIC_PIN_GROUP_FUNCS("mmc0-1bit-a", jz4760_mmc0_1bit_a,
+ jz4760_mmc0_1bit_a_funcs),
+ INGENIC_PIN_GROUP("mmc0-4bit-a", jz4760_mmc0_4bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit-e", jz4760_mmc0_1bit_e, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit-e", jz4760_mmc0_4bit_e, 0),
+ INGENIC_PIN_GROUP("mmc0-8bit-e", jz4760_mmc0_8bit_e, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-d", jz4760_mmc1_1bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit-d", jz4760_mmc1_4bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-e", jz4760_mmc1_1bit_e, 1),
+ INGENIC_PIN_GROUP("mmc1-4bit-e", jz4760_mmc1_4bit_e, 1),
+ INGENIC_PIN_GROUP("mmc1-8bit-e", jz4760_mmc1_8bit_e, 1),
+ INGENIC_PIN_GROUP("mmc2-1bit-b", jz4760_mmc2_1bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-4bit-b", jz4760_mmc2_4bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-1bit-e", jz4760_mmc2_1bit_e, 2),
+ INGENIC_PIN_GROUP("mmc2-4bit-e", jz4760_mmc2_4bit_e, 2),
+ INGENIC_PIN_GROUP("mmc2-8bit-e", jz4760_mmc2_8bit_e, 2),
+ INGENIC_PIN_GROUP("nemc-8bit-data", jz4760_nemc_8bit_data, 0),
+ INGENIC_PIN_GROUP("nemc-16bit-data", jz4760_nemc_16bit_data, 0),
+ INGENIC_PIN_GROUP("nemc-cle-ale", jz4760_nemc_cle_ale, 0),
+ INGENIC_PIN_GROUP("nemc-addr", jz4760_nemc_addr, 0),
+ INGENIC_PIN_GROUP("nemc-rd-we", jz4760_nemc_rd_we, 0),
+ INGENIC_PIN_GROUP("nemc-frd-fwe", jz4760_nemc_frd_fwe, 0),
+ INGENIC_PIN_GROUP("nemc-wait", jz4760_nemc_wait, 0),
+ INGENIC_PIN_GROUP("nemc-cs1", jz4760_nemc_cs1, 0),
+ INGENIC_PIN_GROUP("nemc-cs2", jz4760_nemc_cs2, 0),
+ INGENIC_PIN_GROUP("nemc-cs3", jz4760_nemc_cs3, 0),
+ INGENIC_PIN_GROUP("nemc-cs4", jz4760_nemc_cs4, 0),
+ INGENIC_PIN_GROUP("nemc-cs5", jz4760_nemc_cs5, 0),
+ INGENIC_PIN_GROUP("nemc-cs6", jz4760_nemc_cs6, 0),
+ INGENIC_PIN_GROUP("i2c0-data", jz4760_i2c0, 0),
+ INGENIC_PIN_GROUP("i2c1-data", jz4760_i2c1, 0),
+ INGENIC_PIN_GROUP("cim-data", jz4760_cim, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4760_lcd_24bit, 0),
{ "lcd-no-pins", },
- INGENIC_PIN_GROUP("pwm0", jz4760_pwm_pwm0),
- INGENIC_PIN_GROUP("pwm1", jz4760_pwm_pwm1),
- INGENIC_PIN_GROUP("pwm2", jz4760_pwm_pwm2),
- INGENIC_PIN_GROUP("pwm3", jz4760_pwm_pwm3),
- INGENIC_PIN_GROUP("pwm4", jz4760_pwm_pwm4),
- INGENIC_PIN_GROUP("pwm5", jz4760_pwm_pwm5),
- INGENIC_PIN_GROUP("pwm6", jz4760_pwm_pwm6),
- INGENIC_PIN_GROUP("pwm7", jz4760_pwm_pwm7),
+ INGENIC_PIN_GROUP("pwm0", jz4760_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4760_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", jz4760_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4760_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("pwm4", jz4760_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("pwm5", jz4760_pwm_pwm5, 0),
+ INGENIC_PIN_GROUP("pwm6", jz4760_pwm_pwm6, 0),
+ INGENIC_PIN_GROUP("pwm7", jz4760_pwm_pwm7, 0),
};
static const char *jz4760_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -715,6 +626,10 @@ static int jz4770_cim_8bit_pins[] = {
static int jz4770_cim_12bit_pins[] = {
0x32, 0x33, 0xb0, 0xb1,
};
+static int jz4770_lcd_8bit_pins[] = {
+ 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x4c, 0x4d,
+ 0x48, 0x49, 0x52, 0x53,
+};
static int jz4770_lcd_24bit_pins[] = {
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
@@ -735,200 +650,104 @@ static int jz4770_mac_rmii_pins[] = {
static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
static int jz4770_otg_pins[] = { 0x8a, };
-static int jz4770_uart0_data_funcs[] = { 0, 0, };
-static int jz4770_uart0_hwflow_funcs[] = { 0, 0, };
-static int jz4770_uart1_data_funcs[] = { 0, 0, };
-static int jz4770_uart1_hwflow_funcs[] = { 0, 0, };
-static int jz4770_uart2_data_funcs[] = { 0, 0, };
-static int jz4770_uart2_hwflow_funcs[] = { 0, 0, };
-static int jz4770_uart3_data_funcs[] = { 0, 1, };
-static int jz4770_uart3_hwflow_funcs[] = { 0, 0, };
-static int jz4770_ssi0_dt_a_funcs[] = { 2, };
-static int jz4770_ssi0_dt_b_funcs[] = { 1, };
-static int jz4770_ssi0_dt_d_funcs[] = { 1, };
-static int jz4770_ssi0_dt_e_funcs[] = { 0, };
-static int jz4770_ssi0_dr_a_funcs[] = { 1, };
-static int jz4770_ssi0_dr_b_funcs[] = { 1, };
-static int jz4770_ssi0_dr_d_funcs[] = { 1, };
-static int jz4770_ssi0_dr_e_funcs[] = { 0, };
-static int jz4770_ssi0_clk_a_funcs[] = { 2, };
-static int jz4770_ssi0_clk_b_funcs[] = { 1, };
-static int jz4770_ssi0_clk_d_funcs[] = { 1, };
-static int jz4770_ssi0_clk_e_funcs[] = { 0, };
-static int jz4770_ssi0_gpc_b_funcs[] = { 1, };
-static int jz4770_ssi0_gpc_d_funcs[] = { 1, };
-static int jz4770_ssi0_gpc_e_funcs[] = { 0, };
-static int jz4770_ssi0_ce0_a_funcs[] = { 2, };
-static int jz4770_ssi0_ce0_b_funcs[] = { 1, };
-static int jz4770_ssi0_ce0_d_funcs[] = { 1, };
-static int jz4770_ssi0_ce0_e_funcs[] = { 0, };
-static int jz4770_ssi0_ce1_b_funcs[] = { 1, };
-static int jz4770_ssi0_ce1_d_funcs[] = { 1, };
-static int jz4770_ssi0_ce1_e_funcs[] = { 0, };
-static int jz4770_ssi1_dt_b_funcs[] = { 2, };
-static int jz4770_ssi1_dt_d_funcs[] = { 2, };
-static int jz4770_ssi1_dt_e_funcs[] = { 1, };
-static int jz4770_ssi1_dr_b_funcs[] = { 2, };
-static int jz4770_ssi1_dr_d_funcs[] = { 2, };
-static int jz4770_ssi1_dr_e_funcs[] = { 1, };
-static int jz4770_ssi1_clk_b_funcs[] = { 2, };
-static int jz4770_ssi1_clk_d_funcs[] = { 2, };
-static int jz4770_ssi1_clk_e_funcs[] = { 1, };
-static int jz4770_ssi1_gpc_b_funcs[] = { 2, };
-static int jz4770_ssi1_gpc_d_funcs[] = { 2, };
-static int jz4770_ssi1_gpc_e_funcs[] = { 1, };
-static int jz4770_ssi1_ce0_b_funcs[] = { 2, };
-static int jz4770_ssi1_ce0_d_funcs[] = { 2, };
-static int jz4770_ssi1_ce0_e_funcs[] = { 1, };
-static int jz4770_ssi1_ce1_b_funcs[] = { 2, };
-static int jz4770_ssi1_ce1_d_funcs[] = { 2, };
-static int jz4770_ssi1_ce1_e_funcs[] = { 1, };
-static int jz4770_mmc0_1bit_a_funcs[] = { 1, 1, 0, };
-static int jz4770_mmc0_4bit_a_funcs[] = { 1, 1, 1, };
-static int jz4770_mmc0_1bit_e_funcs[] = { 0, 0, 0, };
-static int jz4770_mmc0_4bit_e_funcs[] = { 0, 0, 0, };
-static int jz4770_mmc0_8bit_e_funcs[] = { 0, 0, 0, 0, };
-static int jz4770_mmc1_1bit_d_funcs[] = { 0, 0, 0, };
-static int jz4770_mmc1_4bit_d_funcs[] = { 0, 0, 0, };
-static int jz4770_mmc1_1bit_e_funcs[] = { 1, 1, 1, };
-static int jz4770_mmc1_4bit_e_funcs[] = { 1, 1, 1, };
-static int jz4770_mmc1_8bit_e_funcs[] = { 1, 1, 1, 1, };
-static int jz4770_mmc2_1bit_b_funcs[] = { 0, 0, 0, };
-static int jz4770_mmc2_4bit_b_funcs[] = { 0, 0, 0, };
-static int jz4770_mmc2_1bit_e_funcs[] = { 2, 2, 2, };
-static int jz4770_mmc2_4bit_e_funcs[] = { 2, 2, 2, };
-static int jz4770_mmc2_8bit_e_funcs[] = { 2, 2, 2, 2, };
-static int jz4770_nemc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4770_nemc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4770_nemc_cle_ale_funcs[] = { 0, 0, };
-static int jz4770_nemc_addr_funcs[] = { 0, 0, 0, 0, };
-static int jz4770_nemc_rd_we_funcs[] = { 0, 0, };
-static int jz4770_nemc_frd_fwe_funcs[] = { 0, 0, };
-static int jz4770_nemc_wait_funcs[] = { 0, };
-static int jz4770_nemc_cs1_funcs[] = { 0, };
-static int jz4770_nemc_cs2_funcs[] = { 0, };
-static int jz4770_nemc_cs3_funcs[] = { 0, };
-static int jz4770_nemc_cs4_funcs[] = { 0, };
-static int jz4770_nemc_cs5_funcs[] = { 0, };
-static int jz4770_nemc_cs6_funcs[] = { 0, };
-static int jz4770_i2c0_funcs[] = { 0, 0, };
-static int jz4770_i2c1_funcs[] = { 0, 0, };
-static int jz4770_i2c2_funcs[] = { 2, 2, };
-static int jz4770_cim_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4770_cim_12bit_funcs[] = { 0, 0, 0, 0, };
-static int jz4770_lcd_24bit_funcs[] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0,
-};
-static int jz4770_pwm_pwm0_funcs[] = { 0, };
-static int jz4770_pwm_pwm1_funcs[] = { 0, };
-static int jz4770_pwm_pwm2_funcs[] = { 0, };
-static int jz4770_pwm_pwm3_funcs[] = { 0, };
-static int jz4770_pwm_pwm4_funcs[] = { 0, };
-static int jz4770_pwm_pwm5_funcs[] = { 0, };
-static int jz4770_pwm_pwm6_funcs[] = { 0, };
-static int jz4770_pwm_pwm7_funcs[] = { 0, };
-static int jz4770_mac_rmii_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4770_mac_mii_funcs[] = { 0, 0, };
-static int jz4770_otg_funcs[] = { 0, };
-
static const struct group_desc jz4770_groups[] = {
- INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
- INGENIC_PIN_GROUP("uart0-hwflow", jz4770_uart0_hwflow),
- INGENIC_PIN_GROUP("uart1-data", jz4770_uart1_data),
- INGENIC_PIN_GROUP("uart1-hwflow", jz4770_uart1_hwflow),
- INGENIC_PIN_GROUP("uart2-data", jz4770_uart2_data),
- INGENIC_PIN_GROUP("uart2-hwflow", jz4770_uart2_hwflow),
- INGENIC_PIN_GROUP("uart3-data", jz4770_uart3_data),
- INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow),
- INGENIC_PIN_GROUP("ssi0-dt-a", jz4770_ssi0_dt_a),
- INGENIC_PIN_GROUP("ssi0-dt-b", jz4770_ssi0_dt_b),
- INGENIC_PIN_GROUP("ssi0-dt-d", jz4770_ssi0_dt_d),
- INGENIC_PIN_GROUP("ssi0-dt-e", jz4770_ssi0_dt_e),
- INGENIC_PIN_GROUP("ssi0-dr-a", jz4770_ssi0_dr_a),
- INGENIC_PIN_GROUP("ssi0-dr-b", jz4770_ssi0_dr_b),
- INGENIC_PIN_GROUP("ssi0-dr-d", jz4770_ssi0_dr_d),
- INGENIC_PIN_GROUP("ssi0-dr-e", jz4770_ssi0_dr_e),
- INGENIC_PIN_GROUP("ssi0-clk-a", jz4770_ssi0_clk_a),
- INGENIC_PIN_GROUP("ssi0-clk-b", jz4770_ssi0_clk_b),
- INGENIC_PIN_GROUP("ssi0-clk-d", jz4770_ssi0_clk_d),
- INGENIC_PIN_GROUP("ssi0-clk-e", jz4770_ssi0_clk_e),
- INGENIC_PIN_GROUP("ssi0-gpc-b", jz4770_ssi0_gpc_b),
- INGENIC_PIN_GROUP("ssi0-gpc-d", jz4770_ssi0_gpc_d),
- INGENIC_PIN_GROUP("ssi0-gpc-e", jz4770_ssi0_gpc_e),
- INGENIC_PIN_GROUP("ssi0-ce0-a", jz4770_ssi0_ce0_a),
- INGENIC_PIN_GROUP("ssi0-ce0-b", jz4770_ssi0_ce0_b),
- INGENIC_PIN_GROUP("ssi0-ce0-d", jz4770_ssi0_ce0_d),
- INGENIC_PIN_GROUP("ssi0-ce0-e", jz4770_ssi0_ce0_e),
- INGENIC_PIN_GROUP("ssi0-ce1-b", jz4770_ssi0_ce1_b),
- INGENIC_PIN_GROUP("ssi0-ce1-d", jz4770_ssi0_ce1_d),
- INGENIC_PIN_GROUP("ssi0-ce1-e", jz4770_ssi0_ce1_e),
- INGENIC_PIN_GROUP("ssi1-dt-b", jz4770_ssi1_dt_b),
- INGENIC_PIN_GROUP("ssi1-dt-d", jz4770_ssi1_dt_d),
- INGENIC_PIN_GROUP("ssi1-dt-e", jz4770_ssi1_dt_e),
- INGENIC_PIN_GROUP("ssi1-dr-b", jz4770_ssi1_dr_b),
- INGENIC_PIN_GROUP("ssi1-dr-d", jz4770_ssi1_dr_d),
- INGENIC_PIN_GROUP("ssi1-dr-e", jz4770_ssi1_dr_e),
- INGENIC_PIN_GROUP("ssi1-clk-b", jz4770_ssi1_clk_b),
- INGENIC_PIN_GROUP("ssi1-clk-d", jz4770_ssi1_clk_d),
- INGENIC_PIN_GROUP("ssi1-clk-e", jz4770_ssi1_clk_e),
- INGENIC_PIN_GROUP("ssi1-gpc-b", jz4770_ssi1_gpc_b),
- INGENIC_PIN_GROUP("ssi1-gpc-d", jz4770_ssi1_gpc_d),
- INGENIC_PIN_GROUP("ssi1-gpc-e", jz4770_ssi1_gpc_e),
- INGENIC_PIN_GROUP("ssi1-ce0-b", jz4770_ssi1_ce0_b),
- INGENIC_PIN_GROUP("ssi1-ce0-d", jz4770_ssi1_ce0_d),
- INGENIC_PIN_GROUP("ssi1-ce0-e", jz4770_ssi1_ce0_e),
- INGENIC_PIN_GROUP("ssi1-ce1-b", jz4770_ssi1_ce1_b),
- INGENIC_PIN_GROUP("ssi1-ce1-d", jz4770_ssi1_ce1_d),
- INGENIC_PIN_GROUP("ssi1-ce1-e", jz4770_ssi1_ce1_e),
- INGENIC_PIN_GROUP("mmc0-1bit-a", jz4770_mmc0_1bit_a),
- INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a),
- INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e),
- INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e),
- INGENIC_PIN_GROUP("mmc0-8bit-e", jz4770_mmc0_8bit_e),
- INGENIC_PIN_GROUP("mmc1-1bit-d", jz4770_mmc1_1bit_d),
- INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d),
- INGENIC_PIN_GROUP("mmc1-1bit-e", jz4770_mmc1_1bit_e),
- INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e),
- INGENIC_PIN_GROUP("mmc1-8bit-e", jz4770_mmc1_8bit_e),
- INGENIC_PIN_GROUP("mmc2-1bit-b", jz4770_mmc2_1bit_b),
- INGENIC_PIN_GROUP("mmc2-4bit-b", jz4770_mmc2_4bit_b),
- INGENIC_PIN_GROUP("mmc2-1bit-e", jz4770_mmc2_1bit_e),
- INGENIC_PIN_GROUP("mmc2-4bit-e", jz4770_mmc2_4bit_e),
- INGENIC_PIN_GROUP("mmc2-8bit-e", jz4770_mmc2_8bit_e),
- INGENIC_PIN_GROUP("nemc-8bit-data", jz4770_nemc_8bit_data),
- INGENIC_PIN_GROUP("nemc-16bit-data", jz4770_nemc_16bit_data),
- INGENIC_PIN_GROUP("nemc-cle-ale", jz4770_nemc_cle_ale),
- INGENIC_PIN_GROUP("nemc-addr", jz4770_nemc_addr),
- INGENIC_PIN_GROUP("nemc-rd-we", jz4770_nemc_rd_we),
- INGENIC_PIN_GROUP("nemc-frd-fwe", jz4770_nemc_frd_fwe),
- INGENIC_PIN_GROUP("nemc-wait", jz4770_nemc_wait),
- INGENIC_PIN_GROUP("nemc-cs1", jz4770_nemc_cs1),
- INGENIC_PIN_GROUP("nemc-cs2", jz4770_nemc_cs2),
- INGENIC_PIN_GROUP("nemc-cs3", jz4770_nemc_cs3),
- INGENIC_PIN_GROUP("nemc-cs4", jz4770_nemc_cs4),
- INGENIC_PIN_GROUP("nemc-cs5", jz4770_nemc_cs5),
- INGENIC_PIN_GROUP("nemc-cs6", jz4770_nemc_cs6),
- INGENIC_PIN_GROUP("i2c0-data", jz4770_i2c0),
- INGENIC_PIN_GROUP("i2c1-data", jz4770_i2c1),
- INGENIC_PIN_GROUP("i2c2-data", jz4770_i2c2),
- INGENIC_PIN_GROUP("cim-data-8bit", jz4770_cim_8bit),
- INGENIC_PIN_GROUP("cim-data-12bit", jz4770_cim_12bit),
- INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit),
+ INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4770_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data", jz4770_uart1_data, 0),
+ INGENIC_PIN_GROUP("uart1-hwflow", jz4770_uart1_hwflow, 0),
+ INGENIC_PIN_GROUP("uart2-data", jz4770_uart2_data, 0),
+ INGENIC_PIN_GROUP("uart2-hwflow", jz4770_uart2_hwflow, 0),
+ INGENIC_PIN_GROUP_FUNCS("uart3-data", jz4770_uart3_data,
+ jz4760_uart3_data_funcs),
+ INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow, 0),
+ INGENIC_PIN_GROUP("ssi0-dt-a", jz4770_ssi0_dt_a, 2),
+ INGENIC_PIN_GROUP("ssi0-dt-b", jz4770_ssi0_dt_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-d", jz4770_ssi0_dt_d, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-e", jz4770_ssi0_dt_e, 0),
+ INGENIC_PIN_GROUP("ssi0-dr-a", jz4770_ssi0_dr_a, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-b", jz4770_ssi0_dr_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-d", jz4770_ssi0_dr_d, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-e", jz4770_ssi0_dr_e, 0),
+ INGENIC_PIN_GROUP("ssi0-clk-a", jz4770_ssi0_clk_a, 2),
+ INGENIC_PIN_GROUP("ssi0-clk-b", jz4770_ssi0_clk_b, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-d", jz4770_ssi0_clk_d, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-e", jz4770_ssi0_clk_e, 0),
+ INGENIC_PIN_GROUP("ssi0-gpc-b", jz4770_ssi0_gpc_b, 1),
+ INGENIC_PIN_GROUP("ssi0-gpc-d", jz4770_ssi0_gpc_d, 1),
+ INGENIC_PIN_GROUP("ssi0-gpc-e", jz4770_ssi0_gpc_e, 0),
+ INGENIC_PIN_GROUP("ssi0-ce0-a", jz4770_ssi0_ce0_a, 2),
+ INGENIC_PIN_GROUP("ssi0-ce0-b", jz4770_ssi0_ce0_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-d", jz4770_ssi0_ce0_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-e", jz4770_ssi0_ce0_e, 0),
+ INGENIC_PIN_GROUP("ssi0-ce1-b", jz4770_ssi0_ce1_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce1-d", jz4770_ssi0_ce1_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce1-e", jz4770_ssi0_ce1_e, 0),
+ INGENIC_PIN_GROUP("ssi1-dt-b", jz4770_ssi1_dt_b, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-d", jz4770_ssi1_dt_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-e", jz4770_ssi1_dt_e, 1),
+ INGENIC_PIN_GROUP("ssi1-dr-b", jz4770_ssi1_dr_b, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-d", jz4770_ssi1_dr_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-e", jz4770_ssi1_dr_e, 1),
+ INGENIC_PIN_GROUP("ssi1-clk-b", jz4770_ssi1_clk_b, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-d", jz4770_ssi1_clk_d, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-e", jz4770_ssi1_clk_e, 1),
+ INGENIC_PIN_GROUP("ssi1-gpc-b", jz4770_ssi1_gpc_b, 2),
+ INGENIC_PIN_GROUP("ssi1-gpc-d", jz4770_ssi1_gpc_d, 2),
+ INGENIC_PIN_GROUP("ssi1-gpc-e", jz4770_ssi1_gpc_e, 1),
+ INGENIC_PIN_GROUP("ssi1-ce0-b", jz4770_ssi1_ce0_b, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", jz4770_ssi1_ce0_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-e", jz4770_ssi1_ce0_e, 1),
+ INGENIC_PIN_GROUP("ssi1-ce1-b", jz4770_ssi1_ce1_b, 2),
+ INGENIC_PIN_GROUP("ssi1-ce1-d", jz4770_ssi1_ce1_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce1-e", jz4770_ssi1_ce1_e, 1),
+ INGENIC_PIN_GROUP_FUNCS("mmc0-1bit-a", jz4770_mmc0_1bit_a,
+ jz4760_mmc0_1bit_a_funcs),
+ INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e, 0),
+ INGENIC_PIN_GROUP("mmc0-8bit-e", jz4770_mmc0_8bit_e, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-d", jz4770_mmc1_1bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-e", jz4770_mmc1_1bit_e, 1),
+ INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e, 1),
+ INGENIC_PIN_GROUP("mmc1-8bit-e", jz4770_mmc1_8bit_e, 1),
+ INGENIC_PIN_GROUP("mmc2-1bit-b", jz4770_mmc2_1bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-4bit-b", jz4770_mmc2_4bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-1bit-e", jz4770_mmc2_1bit_e, 2),
+ INGENIC_PIN_GROUP("mmc2-4bit-e", jz4770_mmc2_4bit_e, 2),
+ INGENIC_PIN_GROUP("mmc2-8bit-e", jz4770_mmc2_8bit_e, 2),
+ INGENIC_PIN_GROUP("nemc-8bit-data", jz4770_nemc_8bit_data, 0),
+ INGENIC_PIN_GROUP("nemc-16bit-data", jz4770_nemc_16bit_data, 0),
+ INGENIC_PIN_GROUP("nemc-cle-ale", jz4770_nemc_cle_ale, 0),
+ INGENIC_PIN_GROUP("nemc-addr", jz4770_nemc_addr, 0),
+ INGENIC_PIN_GROUP("nemc-rd-we", jz4770_nemc_rd_we, 0),
+ INGENIC_PIN_GROUP("nemc-frd-fwe", jz4770_nemc_frd_fwe, 0),
+ INGENIC_PIN_GROUP("nemc-wait", jz4770_nemc_wait, 0),
+ INGENIC_PIN_GROUP("nemc-cs1", jz4770_nemc_cs1, 0),
+ INGENIC_PIN_GROUP("nemc-cs2", jz4770_nemc_cs2, 0),
+ INGENIC_PIN_GROUP("nemc-cs3", jz4770_nemc_cs3, 0),
+ INGENIC_PIN_GROUP("nemc-cs4", jz4770_nemc_cs4, 0),
+ INGENIC_PIN_GROUP("nemc-cs5", jz4770_nemc_cs5, 0),
+ INGENIC_PIN_GROUP("nemc-cs6", jz4770_nemc_cs6, 0),
+ INGENIC_PIN_GROUP("i2c0-data", jz4770_i2c0, 0),
+ INGENIC_PIN_GROUP("i2c1-data", jz4770_i2c1, 0),
+ INGENIC_PIN_GROUP("i2c2-data", jz4770_i2c2, 2),
+ INGENIC_PIN_GROUP("cim-data-8bit", jz4770_cim_8bit, 0),
+ INGENIC_PIN_GROUP("cim-data-12bit", jz4770_cim_12bit, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4770_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit, 0),
{ "lcd-no-pins", },
- INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0),
- INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1),
- INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2),
- INGENIC_PIN_GROUP("pwm3", jz4770_pwm_pwm3),
- INGENIC_PIN_GROUP("pwm4", jz4770_pwm_pwm4),
- INGENIC_PIN_GROUP("pwm5", jz4770_pwm_pwm5),
- INGENIC_PIN_GROUP("pwm6", jz4770_pwm_pwm6),
- INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7),
- INGENIC_PIN_GROUP("mac-rmii", jz4770_mac_rmii),
- INGENIC_PIN_GROUP("mac-mii", jz4770_mac_mii),
- INGENIC_PIN_GROUP("otg-vbus", jz4770_otg),
+ INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4770_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("pwm4", jz4770_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("pwm5", jz4770_pwm_pwm5, 0),
+ INGENIC_PIN_GROUP("pwm6", jz4770_pwm_pwm6, 0),
+ INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7, 0),
+ INGENIC_PIN_GROUP("mac-rmii", jz4770_mac_rmii, 0),
+ INGENIC_PIN_GROUP("mac-mii", jz4770_mac_mii, 0),
+ INGENIC_PIN_GROUP("otg-vbus", jz4770_otg, 0),
};
static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -977,7 +796,9 @@ static const char *jz4770_i2c0_groups[] = { "i2c0-data", };
static const char *jz4770_i2c1_groups[] = { "i2c1-data", };
static const char *jz4770_i2c2_groups[] = { "i2c2-data", };
static const char *jz4770_cim_groups[] = { "cim-data-8bit", "cim-data-12bit", };
-static const char *jz4770_lcd_groups[] = { "lcd-24bit", "lcd-no-pins", };
+static const char *jz4770_lcd_groups[] = {
+ "lcd-8bit", "lcd-24bit", "lcd-no-pins",
+};
static const char *jz4770_pwm0_groups[] = { "pwm0", };
static const char *jz4770_pwm1_groups[] = { "pwm1", };
static const char *jz4770_pwm2_groups[] = { "pwm2", };
@@ -1090,156 +911,115 @@ static int jz4780_i2s_clk_rx_pins[] = { 0x88, 0x89, };
static int jz4780_i2s_sysclk_pins[] = { 0x85, };
static int jz4780_hdmi_ddc_pins[] = { 0xb9, 0xb8, };
-static int jz4780_uart2_data_funcs[] = { 1, 1, };
-static int jz4780_uart2_hwflow_funcs[] = { 1, 1, };
-static int jz4780_uart4_data_funcs[] = { 2, 2, };
-static int jz4780_ssi0_dt_a_19_funcs[] = { 2, };
-static int jz4780_ssi0_dt_a_21_funcs[] = { 2, };
-static int jz4780_ssi0_dt_a_28_funcs[] = { 2, };
-static int jz4780_ssi0_dt_b_funcs[] = { 1, };
-static int jz4780_ssi0_dt_d_funcs[] = { 1, };
-static int jz4780_ssi0_dr_a_20_funcs[] = { 2, };
-static int jz4780_ssi0_dr_a_27_funcs[] = { 2, };
-static int jz4780_ssi0_dr_b_funcs[] = { 1, };
-static int jz4780_ssi0_dr_d_funcs[] = { 1, };
-static int jz4780_ssi0_clk_a_funcs[] = { 2, };
-static int jz4780_ssi0_clk_b_5_funcs[] = { 1, };
-static int jz4780_ssi0_clk_b_28_funcs[] = { 1, };
-static int jz4780_ssi0_clk_d_funcs[] = { 1, };
-static int jz4780_ssi0_gpc_b_funcs[] = { 1, };
-static int jz4780_ssi0_gpc_d_funcs[] = { 1, };
-static int jz4780_ssi0_ce0_a_23_funcs[] = { 2, };
-static int jz4780_ssi0_ce0_a_25_funcs[] = { 2, };
-static int jz4780_ssi0_ce0_b_funcs[] = { 1, };
-static int jz4780_ssi0_ce0_d_funcs[] = { 1, };
-static int jz4780_ssi0_ce1_b_funcs[] = { 1, };
-static int jz4780_ssi0_ce1_d_funcs[] = { 1, };
-static int jz4780_ssi1_dt_b_funcs[] = { 2, };
-static int jz4780_ssi1_dt_d_funcs[] = { 2, };
-static int jz4780_ssi1_dr_b_funcs[] = { 2, };
-static int jz4780_ssi1_dr_d_funcs[] = { 2, };
-static int jz4780_ssi1_clk_b_funcs[] = { 2, };
-static int jz4780_ssi1_clk_d_funcs[] = { 2, };
-static int jz4780_ssi1_gpc_b_funcs[] = { 2, };
-static int jz4780_ssi1_gpc_d_funcs[] = { 2, };
-static int jz4780_ssi1_ce0_b_funcs[] = { 2, };
-static int jz4780_ssi1_ce0_d_funcs[] = { 2, };
-static int jz4780_ssi1_ce1_b_funcs[] = { 2, };
-static int jz4780_ssi1_ce1_d_funcs[] = { 2, };
-static int jz4780_mmc0_8bit_a_funcs[] = { 1, 1, 1, 1, 1, };
-static int jz4780_i2c3_funcs[] = { 1, 1, };
-static int jz4780_i2c4_e_funcs[] = { 1, 1, };
-static int jz4780_i2c4_f_funcs[] = { 1, 1, };
-static int jz4780_i2s_data_tx_funcs[] = { 0, };
-static int jz4780_i2s_data_rx_funcs[] = { 0, };
-static int jz4780_i2s_clk_txrx_funcs[] = { 1, 0, };
-static int jz4780_i2s_clk_rx_funcs[] = { 1, 1, };
-static int jz4780_i2s_sysclk_funcs[] = { 2, };
-static int jz4780_hdmi_ddc_funcs[] = { 0, 0, };
+static u8 jz4780_i2s_clk_txrx_funcs[] = { 1, 0, };
static const struct group_desc jz4780_groups[] = {
- INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
- INGENIC_PIN_GROUP("uart0-hwflow", jz4770_uart0_hwflow),
- INGENIC_PIN_GROUP("uart1-data", jz4770_uart1_data),
- INGENIC_PIN_GROUP("uart1-hwflow", jz4770_uart1_hwflow),
- INGENIC_PIN_GROUP("uart2-data", jz4780_uart2_data),
- INGENIC_PIN_GROUP("uart2-hwflow", jz4780_uart2_hwflow),
- INGENIC_PIN_GROUP("uart3-data", jz4770_uart3_data),
- INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow),
- INGENIC_PIN_GROUP("uart4-data", jz4780_uart4_data),
- INGENIC_PIN_GROUP("ssi0-dt-a-19", jz4780_ssi0_dt_a_19),
- INGENIC_PIN_GROUP("ssi0-dt-a-21", jz4780_ssi0_dt_a_21),
- INGENIC_PIN_GROUP("ssi0-dt-a-28", jz4780_ssi0_dt_a_28),
- INGENIC_PIN_GROUP("ssi0-dt-b", jz4780_ssi0_dt_b),
- INGENIC_PIN_GROUP("ssi0-dt-d", jz4780_ssi0_dt_d),
- INGENIC_PIN_GROUP("ssi0-dt-e", jz4770_ssi0_dt_e),
- INGENIC_PIN_GROUP("ssi0-dr-a-20", jz4780_ssi0_dr_a_20),
- INGENIC_PIN_GROUP("ssi0-dr-a-27", jz4780_ssi0_dr_a_27),
- INGENIC_PIN_GROUP("ssi0-dr-b", jz4780_ssi0_dr_b),
- INGENIC_PIN_GROUP("ssi0-dr-d", jz4780_ssi0_dr_d),
- INGENIC_PIN_GROUP("ssi0-dr-e", jz4770_ssi0_dr_e),
- INGENIC_PIN_GROUP("ssi0-clk-a", jz4780_ssi0_clk_a),
- INGENIC_PIN_GROUP("ssi0-clk-b-5", jz4780_ssi0_clk_b_5),
- INGENIC_PIN_GROUP("ssi0-clk-b-28", jz4780_ssi0_clk_b_28),
- INGENIC_PIN_GROUP("ssi0-clk-d", jz4780_ssi0_clk_d),
- INGENIC_PIN_GROUP("ssi0-clk-e", jz4770_ssi0_clk_e),
- INGENIC_PIN_GROUP("ssi0-gpc-b", jz4780_ssi0_gpc_b),
- INGENIC_PIN_GROUP("ssi0-gpc-d", jz4780_ssi0_gpc_d),
- INGENIC_PIN_GROUP("ssi0-gpc-e", jz4770_ssi0_gpc_e),
- INGENIC_PIN_GROUP("ssi0-ce0-a-23", jz4780_ssi0_ce0_a_23),
- INGENIC_PIN_GROUP("ssi0-ce0-a-25", jz4780_ssi0_ce0_a_25),
- INGENIC_PIN_GROUP("ssi0-ce0-b", jz4780_ssi0_ce0_b),
- INGENIC_PIN_GROUP("ssi0-ce0-d", jz4780_ssi0_ce0_d),
- INGENIC_PIN_GROUP("ssi0-ce0-e", jz4770_ssi0_ce0_e),
- INGENIC_PIN_GROUP("ssi0-ce1-b", jz4780_ssi0_ce1_b),
- INGENIC_PIN_GROUP("ssi0-ce1-d", jz4780_ssi0_ce1_d),
- INGENIC_PIN_GROUP("ssi0-ce1-e", jz4770_ssi0_ce1_e),
- INGENIC_PIN_GROUP("ssi1-dt-b", jz4780_ssi1_dt_b),
- INGENIC_PIN_GROUP("ssi1-dt-d", jz4780_ssi1_dt_d),
- INGENIC_PIN_GROUP("ssi1-dt-e", jz4770_ssi1_dt_e),
- INGENIC_PIN_GROUP("ssi1-dr-b", jz4780_ssi1_dr_b),
- INGENIC_PIN_GROUP("ssi1-dr-d", jz4780_ssi1_dr_d),
- INGENIC_PIN_GROUP("ssi1-dr-e", jz4770_ssi1_dr_e),
- INGENIC_PIN_GROUP("ssi1-clk-b", jz4780_ssi1_clk_b),
- INGENIC_PIN_GROUP("ssi1-clk-d", jz4780_ssi1_clk_d),
- INGENIC_PIN_GROUP("ssi1-clk-e", jz4770_ssi1_clk_e),
- INGENIC_PIN_GROUP("ssi1-gpc-b", jz4780_ssi1_gpc_b),
- INGENIC_PIN_GROUP("ssi1-gpc-d", jz4780_ssi1_gpc_d),
- INGENIC_PIN_GROUP("ssi1-gpc-e", jz4770_ssi1_gpc_e),
- INGENIC_PIN_GROUP("ssi1-ce0-b", jz4780_ssi1_ce0_b),
- INGENIC_PIN_GROUP("ssi1-ce0-d", jz4780_ssi1_ce0_d),
- INGENIC_PIN_GROUP("ssi1-ce0-e", jz4770_ssi1_ce0_e),
- INGENIC_PIN_GROUP("ssi1-ce1-b", jz4780_ssi1_ce1_b),
- INGENIC_PIN_GROUP("ssi1-ce1-d", jz4780_ssi1_ce1_d),
- INGENIC_PIN_GROUP("ssi1-ce1-e", jz4770_ssi1_ce1_e),
- INGENIC_PIN_GROUP("mmc0-1bit-a", jz4770_mmc0_1bit_a),
- INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a),
- INGENIC_PIN_GROUP("mmc0-8bit-a", jz4780_mmc0_8bit_a),
- INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e),
- INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e),
- INGENIC_PIN_GROUP("mmc1-1bit-d", jz4770_mmc1_1bit_d),
- INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d),
- INGENIC_PIN_GROUP("mmc1-1bit-e", jz4770_mmc1_1bit_e),
- INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e),
- INGENIC_PIN_GROUP("mmc2-1bit-b", jz4770_mmc2_1bit_b),
- INGENIC_PIN_GROUP("mmc2-4bit-b", jz4770_mmc2_4bit_b),
- INGENIC_PIN_GROUP("mmc2-1bit-e", jz4770_mmc2_1bit_e),
- INGENIC_PIN_GROUP("mmc2-4bit-e", jz4770_mmc2_4bit_e),
- INGENIC_PIN_GROUP("nemc-data", jz4770_nemc_8bit_data),
- INGENIC_PIN_GROUP("nemc-cle-ale", jz4770_nemc_cle_ale),
- INGENIC_PIN_GROUP("nemc-addr", jz4770_nemc_addr),
- INGENIC_PIN_GROUP("nemc-rd-we", jz4770_nemc_rd_we),
- INGENIC_PIN_GROUP("nemc-frd-fwe", jz4770_nemc_frd_fwe),
- INGENIC_PIN_GROUP("nemc-wait", jz4770_nemc_wait),
- INGENIC_PIN_GROUP("nemc-cs1", jz4770_nemc_cs1),
- INGENIC_PIN_GROUP("nemc-cs2", jz4770_nemc_cs2),
- INGENIC_PIN_GROUP("nemc-cs3", jz4770_nemc_cs3),
- INGENIC_PIN_GROUP("nemc-cs4", jz4770_nemc_cs4),
- INGENIC_PIN_GROUP("nemc-cs5", jz4770_nemc_cs5),
- INGENIC_PIN_GROUP("nemc-cs6", jz4770_nemc_cs6),
- INGENIC_PIN_GROUP("i2c0-data", jz4770_i2c0),
- INGENIC_PIN_GROUP("i2c1-data", jz4770_i2c1),
- INGENIC_PIN_GROUP("i2c2-data", jz4770_i2c2),
- INGENIC_PIN_GROUP("i2c3-data", jz4780_i2c3),
- INGENIC_PIN_GROUP("i2c4-data-e", jz4780_i2c4_e),
- INGENIC_PIN_GROUP("i2c4-data-f", jz4780_i2c4_f),
- INGENIC_PIN_GROUP("i2s-data-tx", jz4780_i2s_data_tx),
- INGENIC_PIN_GROUP("i2s-data-rx", jz4780_i2s_data_rx),
- INGENIC_PIN_GROUP("i2s-clk-txrx", jz4780_i2s_clk_txrx),
- INGENIC_PIN_GROUP("i2s-clk-rx", jz4780_i2s_clk_rx),
- INGENIC_PIN_GROUP("i2s-sysclk", jz4780_i2s_sysclk),
- INGENIC_PIN_GROUP("hdmi-ddc", jz4780_hdmi_ddc),
- INGENIC_PIN_GROUP("cim-data", jz4770_cim_8bit),
- INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit),
+ INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4770_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data", jz4770_uart1_data, 0),
+ INGENIC_PIN_GROUP("uart1-hwflow", jz4770_uart1_hwflow, 0),
+ INGENIC_PIN_GROUP("uart2-data", jz4780_uart2_data, 1),
+ INGENIC_PIN_GROUP("uart2-hwflow", jz4780_uart2_hwflow, 1),
+ INGENIC_PIN_GROUP_FUNCS("uart3-data", jz4770_uart3_data,
+ jz4760_uart3_data_funcs),
+ INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow, 0),
+ INGENIC_PIN_GROUP("uart4-data", jz4780_uart4_data, 2),
+ INGENIC_PIN_GROUP("ssi0-dt-a-19", jz4780_ssi0_dt_a_19, 2),
+ INGENIC_PIN_GROUP("ssi0-dt-a-21", jz4780_ssi0_dt_a_21, 2),
+ INGENIC_PIN_GROUP("ssi0-dt-a-28", jz4780_ssi0_dt_a_28, 2),
+ INGENIC_PIN_GROUP("ssi0-dt-b", jz4780_ssi0_dt_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-d", jz4780_ssi0_dt_d, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-e", jz4770_ssi0_dt_e, 0),
+ INGENIC_PIN_GROUP("ssi0-dr-a-20", jz4780_ssi0_dr_a_20, 2),
+ INGENIC_PIN_GROUP("ssi0-dr-a-27", jz4780_ssi0_dr_a_27, 2),
+ INGENIC_PIN_GROUP("ssi0-dr-b", jz4780_ssi0_dr_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-d", jz4780_ssi0_dr_d, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-e", jz4770_ssi0_dr_e, 0),
+ INGENIC_PIN_GROUP("ssi0-clk-a", jz4780_ssi0_clk_a, 2),
+ INGENIC_PIN_GROUP("ssi0-clk-b-5", jz4780_ssi0_clk_b_5, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-b-28", jz4780_ssi0_clk_b_28, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-d", jz4780_ssi0_clk_d, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-e", jz4770_ssi0_clk_e, 0),
+ INGENIC_PIN_GROUP("ssi0-gpc-b", jz4780_ssi0_gpc_b, 1),
+ INGENIC_PIN_GROUP("ssi0-gpc-d", jz4780_ssi0_gpc_d, 1),
+ INGENIC_PIN_GROUP("ssi0-gpc-e", jz4770_ssi0_gpc_e, 0),
+ INGENIC_PIN_GROUP("ssi0-ce0-a-23", jz4780_ssi0_ce0_a_23, 2),
+ INGENIC_PIN_GROUP("ssi0-ce0-a-25", jz4780_ssi0_ce0_a_25, 2),
+ INGENIC_PIN_GROUP("ssi0-ce0-b", jz4780_ssi0_ce0_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-d", jz4780_ssi0_ce0_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-e", jz4770_ssi0_ce0_e, 0),
+ INGENIC_PIN_GROUP("ssi0-ce1-b", jz4780_ssi0_ce1_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce1-d", jz4780_ssi0_ce1_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce1-e", jz4770_ssi0_ce1_e, 0),
+ INGENIC_PIN_GROUP("ssi1-dt-b", jz4780_ssi1_dt_b, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-d", jz4780_ssi1_dt_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-e", jz4770_ssi1_dt_e, 1),
+ INGENIC_PIN_GROUP("ssi1-dr-b", jz4780_ssi1_dr_b, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-d", jz4780_ssi1_dr_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-e", jz4770_ssi1_dr_e, 1),
+ INGENIC_PIN_GROUP("ssi1-clk-b", jz4780_ssi1_clk_b, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-d", jz4780_ssi1_clk_d, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-e", jz4770_ssi1_clk_e, 1),
+ INGENIC_PIN_GROUP("ssi1-gpc-b", jz4780_ssi1_gpc_b, 2),
+ INGENIC_PIN_GROUP("ssi1-gpc-d", jz4780_ssi1_gpc_d, 2),
+ INGENIC_PIN_GROUP("ssi1-gpc-e", jz4770_ssi1_gpc_e, 1),
+ INGENIC_PIN_GROUP("ssi1-ce0-b", jz4780_ssi1_ce0_b, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", jz4780_ssi1_ce0_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-e", jz4770_ssi1_ce0_e, 1),
+ INGENIC_PIN_GROUP("ssi1-ce1-b", jz4780_ssi1_ce1_b, 2),
+ INGENIC_PIN_GROUP("ssi1-ce1-d", jz4780_ssi1_ce1_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce1-e", jz4770_ssi1_ce1_e, 1),
+ INGENIC_PIN_GROUP_FUNCS("mmc0-1bit-a", jz4770_mmc0_1bit_a,
+ jz4760_mmc0_1bit_a_funcs),
+ INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-8bit-a", jz4780_mmc0_8bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-d", jz4770_mmc1_1bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-e", jz4770_mmc1_1bit_e, 1),
+ INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e, 1),
+ INGENIC_PIN_GROUP("mmc2-1bit-b", jz4770_mmc2_1bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-4bit-b", jz4770_mmc2_4bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-1bit-e", jz4770_mmc2_1bit_e, 2),
+ INGENIC_PIN_GROUP("mmc2-4bit-e", jz4770_mmc2_4bit_e, 2),
+ INGENIC_PIN_GROUP("nemc-data", jz4770_nemc_8bit_data, 0),
+ INGENIC_PIN_GROUP("nemc-cle-ale", jz4770_nemc_cle_ale, 0),
+ INGENIC_PIN_GROUP("nemc-addr", jz4770_nemc_addr, 0),
+ INGENIC_PIN_GROUP("nemc-rd-we", jz4770_nemc_rd_we, 0),
+ INGENIC_PIN_GROUP("nemc-frd-fwe", jz4770_nemc_frd_fwe, 0),
+ INGENIC_PIN_GROUP("nemc-wait", jz4770_nemc_wait, 0),
+ INGENIC_PIN_GROUP("nemc-cs1", jz4770_nemc_cs1, 0),
+ INGENIC_PIN_GROUP("nemc-cs2", jz4770_nemc_cs2, 0),
+ INGENIC_PIN_GROUP("nemc-cs3", jz4770_nemc_cs3, 0),
+ INGENIC_PIN_GROUP("nemc-cs4", jz4770_nemc_cs4, 0),
+ INGENIC_PIN_GROUP("nemc-cs5", jz4770_nemc_cs5, 0),
+ INGENIC_PIN_GROUP("nemc-cs6", jz4770_nemc_cs6, 0),
+ INGENIC_PIN_GROUP("i2c0-data", jz4770_i2c0, 0),
+ INGENIC_PIN_GROUP("i2c1-data", jz4770_i2c1, 0),
+ INGENIC_PIN_GROUP("i2c2-data", jz4770_i2c2, 2),
+ INGENIC_PIN_GROUP("i2c3-data", jz4780_i2c3, 1),
+ INGENIC_PIN_GROUP("i2c4-data-e", jz4780_i2c4_e, 1),
+ INGENIC_PIN_GROUP("i2c4-data-f", jz4780_i2c4_f, 1),
+ INGENIC_PIN_GROUP("i2s-data-tx", jz4780_i2s_data_tx, 0),
+ INGENIC_PIN_GROUP("i2s-data-rx", jz4780_i2s_data_rx, 0),
+ INGENIC_PIN_GROUP_FUNCS("i2s-clk-txrx", jz4780_i2s_clk_txrx,
+ jz4780_i2s_clk_txrx_funcs),
+ INGENIC_PIN_GROUP("i2s-clk-rx", jz4780_i2s_clk_rx, 1),
+ INGENIC_PIN_GROUP("i2s-sysclk", jz4780_i2s_sysclk, 2),
+ INGENIC_PIN_GROUP("hdmi-ddc", jz4780_hdmi_ddc, 0),
+ INGENIC_PIN_GROUP("cim-data", jz4770_cim_8bit, 0),
+ INGENIC_PIN_GROUP("cim-data-12bit", jz4770_cim_12bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit, 0),
{ "lcd-no-pins", },
- INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0),
- INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1),
- INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2),
- INGENIC_PIN_GROUP("pwm3", jz4770_pwm_pwm3),
- INGENIC_PIN_GROUP("pwm4", jz4770_pwm_pwm4),
- INGENIC_PIN_GROUP("pwm5", jz4770_pwm_pwm5),
- INGENIC_PIN_GROUP("pwm6", jz4770_pwm_pwm6),
- INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7),
+ INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4770_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("pwm4", jz4770_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("pwm5", jz4770_pwm_pwm5, 0),
+ INGENIC_PIN_GROUP("pwm6", jz4770_pwm_pwm6, 0),
+ INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7, 0),
};
static const char *jz4780_uart2_groups[] = { "uart2-data", "uart2-hwflow", };
@@ -1411,119 +1191,61 @@ static int x1000_mac_pins[] = {
0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x26,
};
-static int x1000_uart0_data_funcs[] = { 0, 0, };
-static int x1000_uart0_hwflow_funcs[] = { 0, 0, };
-static int x1000_uart1_data_a_funcs[] = { 2, 2, };
-static int x1000_uart1_data_d_funcs[] = { 1, 1, };
-static int x1000_uart1_hwflow_funcs[] = { 1, 1, };
-static int x1000_uart2_data_a_funcs[] = { 2, 2, };
-static int x1000_uart2_data_d_funcs[] = { 0, 0, };
-static int x1000_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
-static int x1000_ssi_dt_a_22_funcs[] = { 2, };
-static int x1000_ssi_dt_a_29_funcs[] = { 2, };
-static int x1000_ssi_dt_d_funcs[] = { 0, };
-static int x1000_ssi_dr_a_23_funcs[] = { 2, };
-static int x1000_ssi_dr_a_28_funcs[] = { 2, };
-static int x1000_ssi_dr_d_funcs[] = { 0, };
-static int x1000_ssi_clk_a_24_funcs[] = { 2, };
-static int x1000_ssi_clk_a_26_funcs[] = { 2, };
-static int x1000_ssi_clk_d_funcs[] = { 0, };
-static int x1000_ssi_gpc_a_20_funcs[] = { 2, };
-static int x1000_ssi_gpc_a_31_funcs[] = { 2, };
-static int x1000_ssi_ce0_a_25_funcs[] = { 2, };
-static int x1000_ssi_ce0_a_27_funcs[] = { 2, };
-static int x1000_ssi_ce0_d_funcs[] = { 0, };
-static int x1000_ssi_ce1_a_21_funcs[] = { 2, };
-static int x1000_ssi_ce1_a_30_funcs[] = { 2, };
-static int x1000_mmc0_1bit_funcs[] = { 1, 1, 1, };
-static int x1000_mmc0_4bit_funcs[] = { 1, 1, 1, };
-static int x1000_mmc0_8bit_funcs[] = { 1, 1, 1, 1, 1, };
-static int x1000_mmc1_1bit_funcs[] = { 0, 0, 0, };
-static int x1000_mmc1_4bit_funcs[] = { 0, 0, 0, };
-static int x1000_emc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_emc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_emc_addr_funcs[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-static int x1000_emc_rd_we_funcs[] = { 0, 0, };
-static int x1000_emc_wait_funcs[] = { 0, };
-static int x1000_emc_cs1_funcs[] = { 0, };
-static int x1000_emc_cs2_funcs[] = { 0, };
-static int x1000_i2c0_funcs[] = { 0, 0, };
-static int x1000_i2c1_a_funcs[] = { 2, 2, };
-static int x1000_i2c1_c_funcs[] = { 0, 0, };
-static int x1000_i2c2_funcs[] = { 1, 1, };
-static int x1000_i2s_data_tx_funcs[] = { 1, };
-static int x1000_i2s_data_rx_funcs[] = { 1, };
-static int x1000_i2s_clk_txrx_funcs[] = { 1, 1, };
-static int x1000_i2s_sysclk_funcs[] = { 1, };
-static int x1000_cim_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
-static int x1000_lcd_8bit_funcs[] = {
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-};
-static int x1000_lcd_16bit_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, };
-static int x1000_pwm_pwm0_funcs[] = { 0, };
-static int x1000_pwm_pwm1_funcs[] = { 1, };
-static int x1000_pwm_pwm2_funcs[] = { 1, };
-static int x1000_pwm_pwm3_funcs[] = { 2, };
-static int x1000_pwm_pwm4_funcs[] = { 0, };
-static int x1000_mac_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
-
static const struct group_desc x1000_groups[] = {
- INGENIC_PIN_GROUP("uart0-data", x1000_uart0_data),
- INGENIC_PIN_GROUP("uart0-hwflow", x1000_uart0_hwflow),
- INGENIC_PIN_GROUP("uart1-data-a", x1000_uart1_data_a),
- INGENIC_PIN_GROUP("uart1-data-d", x1000_uart1_data_d),
- INGENIC_PIN_GROUP("uart1-hwflow", x1000_uart1_hwflow),
- INGENIC_PIN_GROUP("uart2-data-a", x1000_uart2_data_a),
- INGENIC_PIN_GROUP("uart2-data-d", x1000_uart2_data_d),
- INGENIC_PIN_GROUP("sfc", x1000_sfc),
- INGENIC_PIN_GROUP("ssi-dt-a-22", x1000_ssi_dt_a_22),
- INGENIC_PIN_GROUP("ssi-dt-a-29", x1000_ssi_dt_a_29),
- INGENIC_PIN_GROUP("ssi-dt-d", x1000_ssi_dt_d),
- INGENIC_PIN_GROUP("ssi-dr-a-23", x1000_ssi_dr_a_23),
- INGENIC_PIN_GROUP("ssi-dr-a-28", x1000_ssi_dr_a_28),
- INGENIC_PIN_GROUP("ssi-dr-d", x1000_ssi_dr_d),
- INGENIC_PIN_GROUP("ssi-clk-a-24", x1000_ssi_clk_a_24),
- INGENIC_PIN_GROUP("ssi-clk-a-26", x1000_ssi_clk_a_26),
- INGENIC_PIN_GROUP("ssi-clk-d", x1000_ssi_clk_d),
- INGENIC_PIN_GROUP("ssi-gpc-a-20", x1000_ssi_gpc_a_20),
- INGENIC_PIN_GROUP("ssi-gpc-a-31", x1000_ssi_gpc_a_31),
- INGENIC_PIN_GROUP("ssi-ce0-a-25", x1000_ssi_ce0_a_25),
- INGENIC_PIN_GROUP("ssi-ce0-a-27", x1000_ssi_ce0_a_27),
- INGENIC_PIN_GROUP("ssi-ce0-d", x1000_ssi_ce0_d),
- INGENIC_PIN_GROUP("ssi-ce1-a-21", x1000_ssi_ce1_a_21),
- INGENIC_PIN_GROUP("ssi-ce1-a-30", x1000_ssi_ce1_a_30),
- INGENIC_PIN_GROUP("mmc0-1bit", x1000_mmc0_1bit),
- INGENIC_PIN_GROUP("mmc0-4bit", x1000_mmc0_4bit),
- INGENIC_PIN_GROUP("mmc0-8bit", x1000_mmc0_8bit),
- INGENIC_PIN_GROUP("mmc1-1bit", x1000_mmc1_1bit),
- INGENIC_PIN_GROUP("mmc1-4bit", x1000_mmc1_4bit),
- INGENIC_PIN_GROUP("emc-8bit-data", x1000_emc_8bit_data),
- INGENIC_PIN_GROUP("emc-16bit-data", x1000_emc_16bit_data),
- INGENIC_PIN_GROUP("emc-addr", x1000_emc_addr),
- INGENIC_PIN_GROUP("emc-rd-we", x1000_emc_rd_we),
- INGENIC_PIN_GROUP("emc-wait", x1000_emc_wait),
- INGENIC_PIN_GROUP("emc-cs1", x1000_emc_cs1),
- INGENIC_PIN_GROUP("emc-cs2", x1000_emc_cs2),
- INGENIC_PIN_GROUP("i2c0-data", x1000_i2c0),
- INGENIC_PIN_GROUP("i2c1-data-a", x1000_i2c1_a),
- INGENIC_PIN_GROUP("i2c1-data-c", x1000_i2c1_c),
- INGENIC_PIN_GROUP("i2c2-data", x1000_i2c2),
- INGENIC_PIN_GROUP("i2s-data-tx", x1000_i2s_data_tx),
- INGENIC_PIN_GROUP("i2s-data-rx", x1000_i2s_data_rx),
- INGENIC_PIN_GROUP("i2s-clk-txrx", x1000_i2s_clk_txrx),
- INGENIC_PIN_GROUP("i2s-sysclk", x1000_i2s_sysclk),
- INGENIC_PIN_GROUP("cim-data", x1000_cim),
- INGENIC_PIN_GROUP("lcd-8bit", x1000_lcd_8bit),
- INGENIC_PIN_GROUP("lcd-16bit", x1000_lcd_16bit),
+ INGENIC_PIN_GROUP("uart0-data", x1000_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", x1000_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data-a", x1000_uart1_data_a, 2),
+ INGENIC_PIN_GROUP("uart1-data-d", x1000_uart1_data_d, 1),
+ INGENIC_PIN_GROUP("uart1-hwflow", x1000_uart1_hwflow, 1),
+ INGENIC_PIN_GROUP("uart2-data-a", x1000_uart2_data_a, 2),
+ INGENIC_PIN_GROUP("uart2-data-d", x1000_uart2_data_d, 0),
+ INGENIC_PIN_GROUP("sfc", x1000_sfc, 1),
+ INGENIC_PIN_GROUP("ssi-dt-a-22", x1000_ssi_dt_a_22, 2),
+ INGENIC_PIN_GROUP("ssi-dt-a-29", x1000_ssi_dt_a_29, 2),
+ INGENIC_PIN_GROUP("ssi-dt-d", x1000_ssi_dt_d, 0),
+ INGENIC_PIN_GROUP("ssi-dr-a-23", x1000_ssi_dr_a_23, 2),
+ INGENIC_PIN_GROUP("ssi-dr-a-28", x1000_ssi_dr_a_28, 2),
+ INGENIC_PIN_GROUP("ssi-dr-d", x1000_ssi_dr_d, 0),
+ INGENIC_PIN_GROUP("ssi-clk-a-24", x1000_ssi_clk_a_24, 2),
+ INGENIC_PIN_GROUP("ssi-clk-a-26", x1000_ssi_clk_a_26, 2),
+ INGENIC_PIN_GROUP("ssi-clk-d", x1000_ssi_clk_d, 0),
+ INGENIC_PIN_GROUP("ssi-gpc-a-20", x1000_ssi_gpc_a_20, 2),
+ INGENIC_PIN_GROUP("ssi-gpc-a-31", x1000_ssi_gpc_a_31, 2),
+ INGENIC_PIN_GROUP("ssi-ce0-a-25", x1000_ssi_ce0_a_25, 2),
+ INGENIC_PIN_GROUP("ssi-ce0-a-27", x1000_ssi_ce0_a_27, 2),
+ INGENIC_PIN_GROUP("ssi-ce0-d", x1000_ssi_ce0_d, 0),
+ INGENIC_PIN_GROUP("ssi-ce1-a-21", x1000_ssi_ce1_a_21, 2),
+ INGENIC_PIN_GROUP("ssi-ce1-a-30", x1000_ssi_ce1_a_30, 2),
+ INGENIC_PIN_GROUP("mmc0-1bit", x1000_mmc0_1bit, 1),
+ INGENIC_PIN_GROUP("mmc0-4bit", x1000_mmc0_4bit, 1),
+ INGENIC_PIN_GROUP("mmc0-8bit", x1000_mmc0_8bit, 1),
+ INGENIC_PIN_GROUP("mmc1-1bit", x1000_mmc1_1bit, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit", x1000_mmc1_4bit, 0),
+ INGENIC_PIN_GROUP("emc-8bit-data", x1000_emc_8bit_data, 0),
+ INGENIC_PIN_GROUP("emc-16bit-data", x1000_emc_16bit_data, 0),
+ INGENIC_PIN_GROUP("emc-addr", x1000_emc_addr, 0),
+ INGENIC_PIN_GROUP("emc-rd-we", x1000_emc_rd_we, 0),
+ INGENIC_PIN_GROUP("emc-wait", x1000_emc_wait, 0),
+ INGENIC_PIN_GROUP("emc-cs1", x1000_emc_cs1, 0),
+ INGENIC_PIN_GROUP("emc-cs2", x1000_emc_cs2, 0),
+ INGENIC_PIN_GROUP("i2c0-data", x1000_i2c0, 0),
+ INGENIC_PIN_GROUP("i2c1-data-a", x1000_i2c1_a, 2),
+ INGENIC_PIN_GROUP("i2c1-data-c", x1000_i2c1_c, 0),
+ INGENIC_PIN_GROUP("i2c2-data", x1000_i2c2, 1),
+ INGENIC_PIN_GROUP("i2s-data-tx", x1000_i2s_data_tx, 1),
+ INGENIC_PIN_GROUP("i2s-data-rx", x1000_i2s_data_rx, 1),
+ INGENIC_PIN_GROUP("i2s-clk-txrx", x1000_i2s_clk_txrx, 1),
+ INGENIC_PIN_GROUP("i2s-sysclk", x1000_i2s_sysclk, 1),
+ INGENIC_PIN_GROUP("cim-data", x1000_cim, 2),
+ INGENIC_PIN_GROUP("lcd-8bit", x1000_lcd_8bit, 1),
+ INGENIC_PIN_GROUP("lcd-16bit", x1000_lcd_16bit, 1),
{ "lcd-no-pins", },
- INGENIC_PIN_GROUP("pwm0", x1000_pwm_pwm0),
- INGENIC_PIN_GROUP("pwm1", x1000_pwm_pwm1),
- INGENIC_PIN_GROUP("pwm2", x1000_pwm_pwm2),
- INGENIC_PIN_GROUP("pwm3", x1000_pwm_pwm3),
- INGENIC_PIN_GROUP("pwm4", x1000_pwm_pwm4),
- INGENIC_PIN_GROUP("mac", x1000_mac),
+ INGENIC_PIN_GROUP("pwm0", x1000_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", x1000_pwm_pwm1, 1),
+ INGENIC_PIN_GROUP("pwm2", x1000_pwm_pwm2, 1),
+ INGENIC_PIN_GROUP("pwm3", x1000_pwm_pwm3, 2),
+ INGENIC_PIN_GROUP("pwm4", x1000_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("mac", x1000_mac, 1),
};
static const char *x1000_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -1633,56 +1355,32 @@ static int x1500_pwm_pwm2_pins[] = { 0x5b, };
static int x1500_pwm_pwm3_pins[] = { 0x26, };
static int x1500_pwm_pwm4_pins[] = { 0x58, };
-static int x1500_uart0_data_funcs[] = { 0, 0, };
-static int x1500_uart0_hwflow_funcs[] = { 0, 0, };
-static int x1500_uart1_data_a_funcs[] = { 2, 2, };
-static int x1500_uart1_data_d_funcs[] = { 1, 1, };
-static int x1500_uart1_hwflow_funcs[] = { 1, 1, };
-static int x1500_uart2_data_a_funcs[] = { 2, 2, };
-static int x1500_uart2_data_d_funcs[] = { 0, 0, };
-static int x1500_mmc_1bit_funcs[] = { 1, 1, 1, };
-static int x1500_mmc_4bit_funcs[] = { 1, 1, 1, };
-static int x1500_i2c0_funcs[] = { 0, 0, };
-static int x1500_i2c1_a_funcs[] = { 2, 2, };
-static int x1500_i2c1_c_funcs[] = { 0, 0, };
-static int x1500_i2c2_funcs[] = { 1, 1, };
-static int x1500_i2s_data_tx_funcs[] = { 1, };
-static int x1500_i2s_data_rx_funcs[] = { 1, };
-static int x1500_i2s_clk_txrx_funcs[] = { 1, 1, };
-static int x1500_i2s_sysclk_funcs[] = { 1, };
-static int x1500_cim_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
-static int x1500_pwm_pwm0_funcs[] = { 0, };
-static int x1500_pwm_pwm1_funcs[] = { 1, };
-static int x1500_pwm_pwm2_funcs[] = { 1, };
-static int x1500_pwm_pwm3_funcs[] = { 2, };
-static int x1500_pwm_pwm4_funcs[] = { 0, };
-
static const struct group_desc x1500_groups[] = {
- INGENIC_PIN_GROUP("uart0-data", x1500_uart0_data),
- INGENIC_PIN_GROUP("uart0-hwflow", x1500_uart0_hwflow),
- INGENIC_PIN_GROUP("uart1-data-a", x1500_uart1_data_a),
- INGENIC_PIN_GROUP("uart1-data-d", x1500_uart1_data_d),
- INGENIC_PIN_GROUP("uart1-hwflow", x1500_uart1_hwflow),
- INGENIC_PIN_GROUP("uart2-data-a", x1500_uart2_data_a),
- INGENIC_PIN_GROUP("uart2-data-d", x1500_uart2_data_d),
- INGENIC_PIN_GROUP("sfc", x1000_sfc),
- INGENIC_PIN_GROUP("mmc-1bit", x1500_mmc_1bit),
- INGENIC_PIN_GROUP("mmc-4bit", x1500_mmc_4bit),
- INGENIC_PIN_GROUP("i2c0-data", x1500_i2c0),
- INGENIC_PIN_GROUP("i2c1-data-a", x1500_i2c1_a),
- INGENIC_PIN_GROUP("i2c1-data-c", x1500_i2c1_c),
- INGENIC_PIN_GROUP("i2c2-data", x1500_i2c2),
- INGENIC_PIN_GROUP("i2s-data-tx", x1500_i2s_data_tx),
- INGENIC_PIN_GROUP("i2s-data-rx", x1500_i2s_data_rx),
- INGENIC_PIN_GROUP("i2s-clk-txrx", x1500_i2s_clk_txrx),
- INGENIC_PIN_GROUP("i2s-sysclk", x1500_i2s_sysclk),
- INGENIC_PIN_GROUP("cim-data", x1500_cim),
+ INGENIC_PIN_GROUP("uart0-data", x1500_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", x1500_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data-a", x1500_uart1_data_a, 2),
+ INGENIC_PIN_GROUP("uart1-data-d", x1500_uart1_data_d, 1),
+ INGENIC_PIN_GROUP("uart1-hwflow", x1500_uart1_hwflow, 1),
+ INGENIC_PIN_GROUP("uart2-data-a", x1500_uart2_data_a, 2),
+ INGENIC_PIN_GROUP("uart2-data-d", x1500_uart2_data_d, 0),
+ INGENIC_PIN_GROUP("sfc", x1000_sfc, 1),
+ INGENIC_PIN_GROUP("mmc-1bit", x1500_mmc_1bit, 1),
+ INGENIC_PIN_GROUP("mmc-4bit", x1500_mmc_4bit, 1),
+ INGENIC_PIN_GROUP("i2c0-data", x1500_i2c0, 0),
+ INGENIC_PIN_GROUP("i2c1-data-a", x1500_i2c1_a, 2),
+ INGENIC_PIN_GROUP("i2c1-data-c", x1500_i2c1_c, 0),
+ INGENIC_PIN_GROUP("i2c2-data", x1500_i2c2, 1),
+ INGENIC_PIN_GROUP("i2s-data-tx", x1500_i2s_data_tx, 1),
+ INGENIC_PIN_GROUP("i2s-data-rx", x1500_i2s_data_rx, 1),
+ INGENIC_PIN_GROUP("i2s-clk-txrx", x1500_i2s_clk_txrx, 1),
+ INGENIC_PIN_GROUP("i2s-sysclk", x1500_i2s_sysclk, 1),
+ INGENIC_PIN_GROUP("cim-data", x1500_cim, 2),
{ "lcd-no-pins", },
- INGENIC_PIN_GROUP("pwm0", x1500_pwm_pwm0),
- INGENIC_PIN_GROUP("pwm1", x1500_pwm_pwm1),
- INGENIC_PIN_GROUP("pwm2", x1500_pwm_pwm2),
- INGENIC_PIN_GROUP("pwm3", x1500_pwm_pwm3),
- INGENIC_PIN_GROUP("pwm4", x1500_pwm_pwm4),
+ INGENIC_PIN_GROUP("pwm0", x1500_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", x1500_pwm_pwm1, 1),
+ INGENIC_PIN_GROUP("pwm2", x1500_pwm_pwm2, 1),
+ INGENIC_PIN_GROUP("pwm3", x1500_pwm_pwm3, 2),
+ INGENIC_PIN_GROUP("pwm4", x1500_pwm_pwm4, 0),
};
static const char *x1500_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -1811,124 +1509,62 @@ static int x1830_mac_pins[] = {
0x29, 0x30, 0x2f, 0x28, 0x2e, 0x2d, 0x2a, 0x2b, 0x26, 0x27,
};
-static int x1830_uart0_data_funcs[] = { 0, 0, };
-static int x1830_uart0_hwflow_funcs[] = { 0, 0, };
-static int x1830_uart1_data_funcs[] = { 0, 0, };
-static int x1830_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
-static int x1830_ssi0_dt_funcs[] = { 0, };
-static int x1830_ssi0_dr_funcs[] = { 0, };
-static int x1830_ssi0_clk_funcs[] = { 0, };
-static int x1830_ssi0_gpc_funcs[] = { 0, };
-static int x1830_ssi0_ce0_funcs[] = { 0, };
-static int x1830_ssi0_ce1_funcs[] = { 0, };
-static int x1830_ssi1_dt_c_funcs[] = { 1, };
-static int x1830_ssi1_dr_c_funcs[] = { 1, };
-static int x1830_ssi1_clk_c_funcs[] = { 1, };
-static int x1830_ssi1_gpc_c_funcs[] = { 1, };
-static int x1830_ssi1_ce0_c_funcs[] = { 1, };
-static int x1830_ssi1_ce1_c_funcs[] = { 1, };
-static int x1830_ssi1_dt_d_funcs[] = { 2, };
-static int x1830_ssi1_dr_d_funcs[] = { 2, };
-static int x1830_ssi1_clk_d_funcs[] = { 2, };
-static int x1830_ssi1_gpc_d_funcs[] = { 2, };
-static int x1830_ssi1_ce0_d_funcs[] = { 2, };
-static int x1830_ssi1_ce1_d_funcs[] = { 2, };
-static int x1830_mmc0_1bit_funcs[] = { 0, 0, 0, };
-static int x1830_mmc0_4bit_funcs[] = { 0, 0, 0, };
-static int x1830_mmc1_1bit_funcs[] = { 0, 0, 0, };
-static int x1830_mmc1_4bit_funcs[] = { 0, 0, 0, };
-static int x1830_i2c0_funcs[] = { 1, 1, };
-static int x1830_i2c1_funcs[] = { 0, 0, };
-static int x1830_i2c2_funcs[] = { 1, 1, };
-static int x1830_i2s_data_tx_funcs[] = { 0, };
-static int x1830_i2s_data_rx_funcs[] = { 0, };
-static int x1830_i2s_clk_txrx_funcs[] = { 0, 0, };
-static int x1830_i2s_clk_rx_funcs[] = { 0, 0, };
-static int x1830_i2s_sysclk_funcs[] = { 0, };
-static int x1830_lcd_rgb_18bit_funcs[] = {
- 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0,
-};
-static int x1830_lcd_slcd_8bit_funcs[] = {
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-};
-static int x1830_lcd_slcd_16bit_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, };
-static int x1830_pwm_pwm0_b_funcs[] = { 0, };
-static int x1830_pwm_pwm0_c_funcs[] = { 1, };
-static int x1830_pwm_pwm1_b_funcs[] = { 0, };
-static int x1830_pwm_pwm1_c_funcs[] = { 1, };
-static int x1830_pwm_pwm2_c_8_funcs[] = { 0, };
-static int x1830_pwm_pwm2_c_13_funcs[] = { 1, };
-static int x1830_pwm_pwm3_c_9_funcs[] = { 0, };
-static int x1830_pwm_pwm3_c_14_funcs[] = { 1, };
-static int x1830_pwm_pwm4_c_15_funcs[] = { 1, };
-static int x1830_pwm_pwm4_c_25_funcs[] = { 0, };
-static int x1830_pwm_pwm5_c_16_funcs[] = { 1, };
-static int x1830_pwm_pwm5_c_26_funcs[] = { 0, };
-static int x1830_pwm_pwm6_c_17_funcs[] = { 1, };
-static int x1830_pwm_pwm6_c_27_funcs[] = { 0, };
-static int x1830_pwm_pwm7_c_18_funcs[] = { 1, };
-static int x1830_pwm_pwm7_c_28_funcs[] = { 0, };
-static int x1830_mac_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
-
static const struct group_desc x1830_groups[] = {
- INGENIC_PIN_GROUP("uart0-data", x1830_uart0_data),
- INGENIC_PIN_GROUP("uart0-hwflow", x1830_uart0_hwflow),
- INGENIC_PIN_GROUP("uart1-data", x1830_uart1_data),
- INGENIC_PIN_GROUP("sfc", x1830_sfc),
- INGENIC_PIN_GROUP("ssi0-dt", x1830_ssi0_dt),
- INGENIC_PIN_GROUP("ssi0-dr", x1830_ssi0_dr),
- INGENIC_PIN_GROUP("ssi0-clk", x1830_ssi0_clk),
- INGENIC_PIN_GROUP("ssi0-gpc", x1830_ssi0_gpc),
- INGENIC_PIN_GROUP("ssi0-ce0", x1830_ssi0_ce0),
- INGENIC_PIN_GROUP("ssi0-ce1", x1830_ssi0_ce1),
- INGENIC_PIN_GROUP("ssi1-dt-c", x1830_ssi1_dt_c),
- INGENIC_PIN_GROUP("ssi1-dr-c", x1830_ssi1_dr_c),
- INGENIC_PIN_GROUP("ssi1-clk-c", x1830_ssi1_clk_c),
- INGENIC_PIN_GROUP("ssi1-gpc-c", x1830_ssi1_gpc_c),
- INGENIC_PIN_GROUP("ssi1-ce0-c", x1830_ssi1_ce0_c),
- INGENIC_PIN_GROUP("ssi1-ce1-c", x1830_ssi1_ce1_c),
- INGENIC_PIN_GROUP("ssi1-dt-d", x1830_ssi1_dt_d),
- INGENIC_PIN_GROUP("ssi1-dr-d", x1830_ssi1_dr_d),
- INGENIC_PIN_GROUP("ssi1-clk-d", x1830_ssi1_clk_d),
- INGENIC_PIN_GROUP("ssi1-gpc-d", x1830_ssi1_gpc_d),
- INGENIC_PIN_GROUP("ssi1-ce0-d", x1830_ssi1_ce0_d),
- INGENIC_PIN_GROUP("ssi1-ce1-d", x1830_ssi1_ce1_d),
- INGENIC_PIN_GROUP("mmc0-1bit", x1830_mmc0_1bit),
- INGENIC_PIN_GROUP("mmc0-4bit", x1830_mmc0_4bit),
- INGENIC_PIN_GROUP("mmc1-1bit", x1830_mmc1_1bit),
- INGENIC_PIN_GROUP("mmc1-4bit", x1830_mmc1_4bit),
- INGENIC_PIN_GROUP("i2c0-data", x1830_i2c0),
- INGENIC_PIN_GROUP("i2c1-data", x1830_i2c1),
- INGENIC_PIN_GROUP("i2c2-data", x1830_i2c2),
- INGENIC_PIN_GROUP("i2s-data-tx", x1830_i2s_data_tx),
- INGENIC_PIN_GROUP("i2s-data-rx", x1830_i2s_data_rx),
- INGENIC_PIN_GROUP("i2s-clk-txrx", x1830_i2s_clk_txrx),
- INGENIC_PIN_GROUP("i2s-clk-rx", x1830_i2s_clk_rx),
- INGENIC_PIN_GROUP("i2s-sysclk", x1830_i2s_sysclk),
- INGENIC_PIN_GROUP("lcd-rgb-18bit", x1830_lcd_rgb_18bit),
- INGENIC_PIN_GROUP("lcd-slcd-8bit", x1830_lcd_slcd_8bit),
- INGENIC_PIN_GROUP("lcd-slcd-16bit", x1830_lcd_slcd_16bit),
+ INGENIC_PIN_GROUP("uart0-data", x1830_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", x1830_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data", x1830_uart1_data, 0),
+ INGENIC_PIN_GROUP("sfc", x1830_sfc, 1),
+ INGENIC_PIN_GROUP("ssi0-dt", x1830_ssi0_dt, 0),
+ INGENIC_PIN_GROUP("ssi0-dr", x1830_ssi0_dr, 0),
+ INGENIC_PIN_GROUP("ssi0-clk", x1830_ssi0_clk, 0),
+ INGENIC_PIN_GROUP("ssi0-gpc", x1830_ssi0_gpc, 0),
+ INGENIC_PIN_GROUP("ssi0-ce0", x1830_ssi0_ce0, 0),
+ INGENIC_PIN_GROUP("ssi0-ce1", x1830_ssi0_ce1, 0),
+ INGENIC_PIN_GROUP("ssi1-dt-c", x1830_ssi1_dt_c, 1),
+ INGENIC_PIN_GROUP("ssi1-dr-c", x1830_ssi1_dr_c, 1),
+ INGENIC_PIN_GROUP("ssi1-clk-c", x1830_ssi1_clk_c, 1),
+ INGENIC_PIN_GROUP("ssi1-gpc-c", x1830_ssi1_gpc_c, 1),
+ INGENIC_PIN_GROUP("ssi1-ce0-c", x1830_ssi1_ce0_c, 1),
+ INGENIC_PIN_GROUP("ssi1-ce1-c", x1830_ssi1_ce1_c, 1),
+ INGENIC_PIN_GROUP("ssi1-dt-d", x1830_ssi1_dt_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-d", x1830_ssi1_dr_d, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-d", x1830_ssi1_clk_d, 2),
+ INGENIC_PIN_GROUP("ssi1-gpc-d", x1830_ssi1_gpc_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", x1830_ssi1_ce0_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce1-d", x1830_ssi1_ce1_d, 2),
+ INGENIC_PIN_GROUP("mmc0-1bit", x1830_mmc0_1bit, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit", x1830_mmc0_4bit, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit", x1830_mmc1_1bit, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit", x1830_mmc1_4bit, 0),
+ INGENIC_PIN_GROUP("i2c0-data", x1830_i2c0, 1),
+ INGENIC_PIN_GROUP("i2c1-data", x1830_i2c1, 0),
+ INGENIC_PIN_GROUP("i2c2-data", x1830_i2c2, 1),
+ INGENIC_PIN_GROUP("i2s-data-tx", x1830_i2s_data_tx, 0),
+ INGENIC_PIN_GROUP("i2s-data-rx", x1830_i2s_data_rx, 0),
+ INGENIC_PIN_GROUP("i2s-clk-txrx", x1830_i2s_clk_txrx, 0),
+ INGENIC_PIN_GROUP("i2s-clk-rx", x1830_i2s_clk_rx, 0),
+ INGENIC_PIN_GROUP("i2s-sysclk", x1830_i2s_sysclk, 0),
+ INGENIC_PIN_GROUP("lcd-rgb-18bit", x1830_lcd_rgb_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-slcd-8bit", x1830_lcd_slcd_8bit, 1),
+ INGENIC_PIN_GROUP("lcd-slcd-16bit", x1830_lcd_slcd_16bit, 1),
{ "lcd-no-pins", },
- INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b),
- INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c),
- INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b),
- INGENIC_PIN_GROUP("pwm1-c", x1830_pwm_pwm1_c),
- INGENIC_PIN_GROUP("pwm2-c-8", x1830_pwm_pwm2_c_8),
- INGENIC_PIN_GROUP("pwm2-c-13", x1830_pwm_pwm2_c_13),
- INGENIC_PIN_GROUP("pwm3-c-9", x1830_pwm_pwm3_c_9),
- INGENIC_PIN_GROUP("pwm3-c-14", x1830_pwm_pwm3_c_14),
- INGENIC_PIN_GROUP("pwm4-c-15", x1830_pwm_pwm4_c_15),
- INGENIC_PIN_GROUP("pwm4-c-25", x1830_pwm_pwm4_c_25),
- INGENIC_PIN_GROUP("pwm5-c-16", x1830_pwm_pwm5_c_16),
- INGENIC_PIN_GROUP("pwm5-c-26", x1830_pwm_pwm5_c_26),
- INGENIC_PIN_GROUP("pwm6-c-17", x1830_pwm_pwm6_c_17),
- INGENIC_PIN_GROUP("pwm6-c-27", x1830_pwm_pwm6_c_27),
- INGENIC_PIN_GROUP("pwm7-c-18", x1830_pwm_pwm7_c_18),
- INGENIC_PIN_GROUP("pwm7-c-28", x1830_pwm_pwm7_c_28),
- INGENIC_PIN_GROUP("mac", x1830_mac),
+ INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b, 0),
+ INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c, 1),
+ INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b, 0),
+ INGENIC_PIN_GROUP("pwm1-c", x1830_pwm_pwm1_c, 1),
+ INGENIC_PIN_GROUP("pwm2-c-8", x1830_pwm_pwm2_c_8, 0),
+ INGENIC_PIN_GROUP("pwm2-c-13", x1830_pwm_pwm2_c_13, 1),
+ INGENIC_PIN_GROUP("pwm3-c-9", x1830_pwm_pwm3_c_9, 0),
+ INGENIC_PIN_GROUP("pwm3-c-14", x1830_pwm_pwm3_c_14, 1),
+ INGENIC_PIN_GROUP("pwm4-c-15", x1830_pwm_pwm4_c_15, 1),
+ INGENIC_PIN_GROUP("pwm4-c-25", x1830_pwm_pwm4_c_25, 0),
+ INGENIC_PIN_GROUP("pwm5-c-16", x1830_pwm_pwm5_c_16, 1),
+ INGENIC_PIN_GROUP("pwm5-c-26", x1830_pwm_pwm5_c_26, 0),
+ INGENIC_PIN_GROUP("pwm6-c-17", x1830_pwm_pwm6_c_17, 1),
+ INGENIC_PIN_GROUP("pwm6-c-27", x1830_pwm_pwm6_c_27, 0),
+ INGENIC_PIN_GROUP("pwm7-c-18", x1830_pwm_pwm7_c_18, 1),
+ INGENIC_PIN_GROUP("pwm7-c-28", x1830_pwm_pwm7_c_28, 0),
+ INGENIC_PIN_GROUP("mac", x1830_mac, 0),
};
static const char *x1830_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -2052,8 +1688,8 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
}
@@ -2082,9 +1718,9 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
break;
}
- if (jzgc->jzpc->info->version >= ID_JZ4760) {
- reg1 = JZ4760_GPIO_PAT1;
- reg2 = JZ4760_GPIO_PAT0;
+ if (jzgc->jzpc->info->version >= ID_JZ4770) {
+ reg1 = JZ4770_GPIO_PAT1;
+ reg2 = JZ4770_GPIO_PAT0;
} else {
reg1 = JZ4740_GPIO_TRIG;
reg2 = JZ4740_GPIO_DIR;
@@ -2122,8 +1758,8 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
@@ -2138,8 +1774,8 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_irq_mask(irqd);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
}
@@ -2163,8 +1799,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
}
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
}
@@ -2220,8 +1856,8 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
else
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
@@ -2302,9 +1938,9 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->info->version >= ID_JZ4760) {
- if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
- ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+ if (jzpc->info->version >= ID_JZ4770) {
+ if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) ||
+ ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
@@ -2355,20 +1991,20 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
'A' + offt, idx, func);
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
}
return 0;
@@ -2381,6 +2017,8 @@ static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
struct function_desc *func;
struct group_desc *grp;
unsigned int i;
+ uintptr_t mode;
+ u8 *pin_modes;
func = pinmux_generic_get_function(pctldev, selector);
if (!func)
@@ -2393,10 +2031,15 @@ static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "enable function %s group %s\n",
func->name, grp->name);
- for (i = 0; i < grp->num_pins; i++) {
- int *pin_modes = grp->data;
+ mode = (uintptr_t)grp->data;
+ if (mode <= 3) {
+ for (i = 0; i < grp->num_pins; i++)
+ ingenic_pinmux_set_pin_fn(jzpc, grp->pins[i], mode);
+ } else {
+ pin_modes = grp->data;
- ingenic_pinmux_set_pin_fn(jzpc, grp->pins[i], pin_modes[i]);
+ for (i = 0; i < grp->num_pins; i++)
+ ingenic_pinmux_set_pin_fn(jzpc, grp->pins[i], pin_modes[i]);
}
return 0;
@@ -2414,14 +2057,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
'A' + offt, idx, input ? "in" : "out");
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
@@ -2448,8 +2091,8 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
bool pull;
- if (jzpc->info->version >= ID_JZ4760)
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
+ if (jzpc->info->version >= ID_JZ4770)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
else
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
@@ -2498,8 +2141,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
REG_SET(X1830_GPIO_PEH), bias << idxh);
}
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
}
@@ -2508,8 +2151,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
- if (jzpc->info->version >= ID_JZ4760)
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ if (jzpc->info->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
else
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
}
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
new file mode 100644
index 000000000000..f35edb0eac40
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -0,0 +1,892 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microsemi/Microchip SoCs serial gpio driver
+ *
+ * Author: Lars Povlsen <lars.povlsen@microchip.com>
+ *
+ * Copyright (c) 2020 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "core.h"
+#include "pinconf.h"
+
+#define SGPIO_BITS_PER_WORD 32
+#define SGPIO_MAX_BITS 4
+#define SGPIO_SRC_BITS 3 /* 3 bit wide field per pin */
+
+enum {
+ REG_INPUT_DATA,
+ REG_PORT_CONFIG,
+ REG_PORT_ENABLE,
+ REG_SIO_CONFIG,
+ REG_SIO_CLOCK,
+ REG_INT_POLARITY,
+ REG_INT_TRIGGER,
+ REG_INT_ACK,
+ REG_INT_ENABLE,
+ REG_INT_IDENT,
+ MAXREG
+};
+
+enum {
+ SGPIO_ARCH_LUTON,
+ SGPIO_ARCH_OCELOT,
+ SGPIO_ARCH_SPARX5,
+};
+
+enum {
+ SGPIO_FLAGS_HAS_IRQ = BIT(0),
+};
+
+struct sgpio_properties {
+ int arch;
+ int flags;
+ u8 regoff[MAXREG];
+};
+
+#define SGPIO_LUTON_AUTO_REPEAT BIT(5)
+#define SGPIO_LUTON_PORT_WIDTH GENMASK(3, 2)
+#define SGPIO_LUTON_CLK_FREQ GENMASK(11, 0)
+#define SGPIO_LUTON_BIT_SOURCE GENMASK(11, 0)
+
+#define SGPIO_OCELOT_AUTO_REPEAT BIT(10)
+#define SGPIO_OCELOT_PORT_WIDTH GENMASK(8, 7)
+#define SGPIO_OCELOT_CLK_FREQ GENMASK(19, 8)
+#define SGPIO_OCELOT_BIT_SOURCE GENMASK(23, 12)
+
+#define SGPIO_SPARX5_AUTO_REPEAT BIT(6)
+#define SGPIO_SPARX5_PORT_WIDTH GENMASK(4, 3)
+#define SGPIO_SPARX5_CLK_FREQ GENMASK(19, 8)
+#define SGPIO_SPARX5_BIT_SOURCE GENMASK(23, 12)
+
+#define SGPIO_MASTER_INTR_ENA BIT(0)
+
+#define SGPIO_INT_TRG_LEVEL 0
+#define SGPIO_INT_TRG_EDGE 1
+#define SGPIO_INT_TRG_EDGE_FALL 2
+#define SGPIO_INT_TRG_EDGE_RISE 3
+
+#define SGPIO_TRG_LEVEL_HIGH 0
+#define SGPIO_TRG_LEVEL_LOW 1
+
+static const struct sgpio_properties properties_luton = {
+ .arch = SGPIO_ARCH_LUTON,
+ .regoff = { 0x00, 0x09, 0x29, 0x2a, 0x2b },
+};
+
+static const struct sgpio_properties properties_ocelot = {
+ .arch = SGPIO_ARCH_OCELOT,
+ .regoff = { 0x00, 0x06, 0x26, 0x04, 0x05 },
+};
+
+static const struct sgpio_properties properties_sparx5 = {
+ .arch = SGPIO_ARCH_SPARX5,
+ .flags = SGPIO_FLAGS_HAS_IRQ,
+ .regoff = { 0x00, 0x06, 0x26, 0x04, 0x05, 0x2a, 0x32, 0x3a, 0x3e, 0x42 },
+};
+
+static const char * const functions[] = { "gpio" };
+
+struct sgpio_bank {
+ struct sgpio_priv *priv;
+ bool is_input;
+ struct gpio_chip gpio;
+ struct pinctrl_desc pctl_desc;
+};
+
+struct sgpio_priv {
+ struct device *dev;
+ struct sgpio_bank in;
+ struct sgpio_bank out;
+ u32 bitcount;
+ u32 ports;
+ u32 clock;
+ u32 __iomem *regs;
+ const struct sgpio_properties *properties;
+};
+
+struct sgpio_port_addr {
+ u8 port;
+ u8 bit;
+};
+
+static inline void sgpio_pin_to_addr(struct sgpio_priv *priv, int pin,
+ struct sgpio_port_addr *addr)
+{
+ addr->port = pin / priv->bitcount;
+ addr->bit = pin % priv->bitcount;
+}
+
+static inline int sgpio_addr_to_pin(struct sgpio_priv *priv, int port, int bit)
+{
+ return bit + port * priv->bitcount;
+}
+
+static inline u32 sgpio_readl(struct sgpio_priv *priv, u32 rno, u32 off)
+{
+ u32 __iomem *reg = &priv->regs[priv->properties->regoff[rno] + off];
+
+ return readl(reg);
+}
+
+static inline void sgpio_writel(struct sgpio_priv *priv,
+ u32 val, u32 rno, u32 off)
+{
+ u32 __iomem *reg = &priv->regs[priv->properties->regoff[rno] + off];
+
+ writel(val, reg);
+}
+
+static inline void sgpio_clrsetbits(struct sgpio_priv *priv,
+ u32 rno, u32 off, u32 clear, u32 set)
+{
+ u32 __iomem *reg = &priv->regs[priv->properties->regoff[rno] + off];
+ u32 val = readl(reg);
+
+ val &= ~clear;
+ val |= set;
+
+ writel(val, reg);
+}
+
+static inline void sgpio_configure_bitstream(struct sgpio_priv *priv)
+{
+ int width = priv->bitcount - 1;
+ u32 clr, set;
+
+ switch (priv->properties->arch) {
+ case SGPIO_ARCH_LUTON:
+ clr = SGPIO_LUTON_PORT_WIDTH;
+ set = SGPIO_LUTON_AUTO_REPEAT |
+ FIELD_PREP(SGPIO_LUTON_PORT_WIDTH, width);
+ break;
+ case SGPIO_ARCH_OCELOT:
+ clr = SGPIO_OCELOT_PORT_WIDTH;
+ set = SGPIO_OCELOT_AUTO_REPEAT |
+ FIELD_PREP(SGPIO_OCELOT_PORT_WIDTH, width);
+ break;
+ case SGPIO_ARCH_SPARX5:
+ clr = SGPIO_SPARX5_PORT_WIDTH;
+ set = SGPIO_SPARX5_AUTO_REPEAT |
+ FIELD_PREP(SGPIO_SPARX5_PORT_WIDTH, width);
+ break;
+ default:
+ return;
+ }
+ sgpio_clrsetbits(priv, REG_SIO_CONFIG, 0, clr, set);
+}
+
+static inline void sgpio_configure_clock(struct sgpio_priv *priv, u32 clkfrq)
+{
+ u32 clr, set;
+
+ switch (priv->properties->arch) {
+ case SGPIO_ARCH_LUTON:
+ clr = SGPIO_LUTON_CLK_FREQ;
+ set = FIELD_PREP(SGPIO_LUTON_CLK_FREQ, clkfrq);
+ break;
+ case SGPIO_ARCH_OCELOT:
+ clr = SGPIO_OCELOT_CLK_FREQ;
+ set = FIELD_PREP(SGPIO_OCELOT_CLK_FREQ, clkfrq);
+ break;
+ case SGPIO_ARCH_SPARX5:
+ clr = SGPIO_SPARX5_CLK_FREQ;
+ set = FIELD_PREP(SGPIO_SPARX5_CLK_FREQ, clkfrq);
+ break;
+ default:
+ return;
+ }
+ sgpio_clrsetbits(priv, REG_SIO_CLOCK, 0, clr, set);
+}
+
+static void sgpio_output_set(struct sgpio_priv *priv,
+ struct sgpio_port_addr *addr,
+ int value)
+{
+ unsigned int bit = SGPIO_SRC_BITS * addr->bit;
+ u32 clr, set;
+
+ switch (priv->properties->arch) {
+ case SGPIO_ARCH_LUTON:
+ clr = FIELD_PREP(SGPIO_LUTON_BIT_SOURCE, BIT(bit));
+ set = FIELD_PREP(SGPIO_LUTON_BIT_SOURCE, value << bit);
+ break;
+ case SGPIO_ARCH_OCELOT:
+ clr = FIELD_PREP(SGPIO_OCELOT_BIT_SOURCE, BIT(bit));
+ set = FIELD_PREP(SGPIO_OCELOT_BIT_SOURCE, value << bit);
+ break;
+ case SGPIO_ARCH_SPARX5:
+ clr = FIELD_PREP(SGPIO_SPARX5_BIT_SOURCE, BIT(bit));
+ set = FIELD_PREP(SGPIO_SPARX5_BIT_SOURCE, value << bit);
+ break;
+ default:
+ return;
+ }
+ sgpio_clrsetbits(priv, REG_PORT_CONFIG, addr->port, clr, set);
+}
+
+static int sgpio_output_get(struct sgpio_priv *priv,
+ struct sgpio_port_addr *addr)
+{
+ u32 val, portval = sgpio_readl(priv, REG_PORT_CONFIG, addr->port);
+ unsigned int bit = SGPIO_SRC_BITS * addr->bit;
+
+ switch (priv->properties->arch) {
+ case SGPIO_ARCH_LUTON:
+ val = FIELD_GET(SGPIO_LUTON_BIT_SOURCE, portval);
+ break;
+ case SGPIO_ARCH_OCELOT:
+ val = FIELD_GET(SGPIO_OCELOT_BIT_SOURCE, portval);
+ break;
+ case SGPIO_ARCH_SPARX5:
+ val = FIELD_GET(SGPIO_SPARX5_BIT_SOURCE, portval);
+ break;
+ default:
+ val = 0;
+ break;
+ }
+ return !!(val & BIT(bit));
+}
+
+static int sgpio_input_get(struct sgpio_priv *priv,
+ struct sgpio_port_addr *addr)
+{
+ return !!(sgpio_readl(priv, REG_INPUT_DATA, addr->bit) & BIT(addr->port));
+}
+
+static int sgpio_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct sgpio_bank *bank = pinctrl_dev_get_drvdata(pctldev);
+ u32 param = pinconf_to_config_param(*config);
+ struct sgpio_priv *priv = bank->priv;
+ struct sgpio_port_addr addr;
+ int val;
+
+ sgpio_pin_to_addr(priv, pin, &addr);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_ENABLE:
+ val = bank->is_input;
+ break;
+
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ val = !bank->is_input;
+ break;
+
+ case PIN_CONFIG_OUTPUT:
+ if (bank->is_input)
+ return -EINVAL;
+ val = sgpio_output_get(priv, &addr);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, val);
+
+ return 0;
+}
+
+static int sgpio_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct sgpio_bank *bank = pinctrl_dev_get_drvdata(pctldev);
+ struct sgpio_priv *priv = bank->priv;
+ struct sgpio_port_addr addr;
+ int cfg, err = 0;
+ u32 param, arg;
+
+ sgpio_pin_to_addr(priv, pin, &addr);
+
+ for (cfg = 0; cfg < num_configs; cfg++) {
+ param = pinconf_to_config_param(configs[cfg]);
+ arg = pinconf_to_config_argument(configs[cfg]);
+
+ switch (param) {
+ case PIN_CONFIG_OUTPUT:
+ if (bank->is_input)
+ return -EINVAL;
+ sgpio_output_set(priv, &addr, arg);
+ break;
+
+ default:
+ err = -ENOTSUPP;
+ }
+ }
+
+ return err;
+}
+
+static const struct pinconf_ops sgpio_confops = {
+ .is_generic = true,
+ .pin_config_get = sgpio_pinconf_get,
+ .pin_config_set = sgpio_pinconf_set,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static int sgpio_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return 1;
+}
+
+static const char *sgpio_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int function)
+{
+ return functions[0];
+}
+
+static int sgpio_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ const char *const **groups,
+ unsigned *const num_groups)
+{
+ *groups = functions;
+ *num_groups = ARRAY_SIZE(functions);
+
+ return 0;
+}
+
+static int sgpio_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ return 0;
+}
+
+static int sgpio_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin, bool input)
+{
+ struct sgpio_bank *bank = pinctrl_dev_get_drvdata(pctldev);
+
+ return (input == bank->is_input) ? 0 : -EINVAL;
+}
+
+static int sgpio_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct sgpio_bank *bank = pinctrl_dev_get_drvdata(pctldev);
+ struct sgpio_priv *priv = bank->priv;
+ struct sgpio_port_addr addr;
+
+ sgpio_pin_to_addr(priv, offset, &addr);
+
+ if ((priv->ports & BIT(addr.port)) == 0) {
+ dev_warn(priv->dev, "Request port %d.%d: Port is not enabled\n",
+ addr.port, addr.bit);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops sgpio_pmx_ops = {
+ .get_functions_count = sgpio_get_functions_count,
+ .get_function_name = sgpio_get_function_name,
+ .get_function_groups = sgpio_get_function_groups,
+ .set_mux = sgpio_pinmux_set_mux,
+ .gpio_set_direction = sgpio_gpio_set_direction,
+ .gpio_request_enable = sgpio_gpio_request_enable,
+};
+
+static int sgpio_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct sgpio_bank *bank = pinctrl_dev_get_drvdata(pctldev);
+
+ return bank->pctl_desc.npins;
+}
+
+static const char *sgpio_pctl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ struct sgpio_bank *bank = pinctrl_dev_get_drvdata(pctldev);
+
+ return bank->pctl_desc.pins[group].name;
+}
+
+static int sgpio_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct sgpio_bank *bank = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &bank->pctl_desc.pins[group].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops sgpio_pctl_ops = {
+ .get_groups_count = sgpio_pctl_get_groups_count,
+ .get_group_name = sgpio_pctl_get_group_name,
+ .get_group_pins = sgpio_pctl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int microchip_sgpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct sgpio_bank *bank = gpiochip_get_data(gc);
+
+ /* Fixed-position function */
+ return bank->is_input ? 0 : -EINVAL;
+}
+
+static int microchip_sgpio_direction_output(struct gpio_chip *gc,
+ unsigned int gpio, int value)
+{
+ struct sgpio_bank *bank = gpiochip_get_data(gc);
+ struct sgpio_priv *priv = bank->priv;
+ struct sgpio_port_addr addr;
+
+ /* Fixed-position function */
+ if (bank->is_input)
+ return -EINVAL;
+
+ sgpio_pin_to_addr(priv, gpio, &addr);
+
+ sgpio_output_set(priv, &addr, value);
+
+ return 0;
+}
+
+static int microchip_sgpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct sgpio_bank *bank = gpiochip_get_data(gc);
+
+ return bank->is_input ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+static void microchip_sgpio_set_value(struct gpio_chip *gc,
+ unsigned int gpio, int value)
+{
+ microchip_sgpio_direction_output(gc, gpio, value);
+}
+
+static int microchip_sgpio_get_value(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct sgpio_bank *bank = gpiochip_get_data(gc);
+ struct sgpio_priv *priv = bank->priv;
+ struct sgpio_port_addr addr;
+
+ sgpio_pin_to_addr(priv, gpio, &addr);
+
+ return bank->is_input ? sgpio_input_get(priv, &addr) : sgpio_output_get(priv, &addr);
+}
+
+static int microchip_sgpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ struct sgpio_bank *bank = gpiochip_get_data(gc);
+ struct sgpio_priv *priv = bank->priv;
+ int pin;
+
+ /*
+ * Note that the SGIO pin is defined by *2* numbers, a port
+ * number between 0 and 31, and a bit index, 0 to 3.
+ */
+ if (gpiospec->args[0] > SGPIO_BITS_PER_WORD ||
+ gpiospec->args[1] > priv->bitcount)
+ return -EINVAL;
+
+ pin = sgpio_addr_to_pin(priv, gpiospec->args[0], gpiospec->args[1]);
+
+ if (pin > gc->ngpio)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[2];
+
+ return pin;
+}
+
+static int microchip_sgpio_get_ports(struct sgpio_priv *priv)
+{
+ const char *range_property_name = "microchip,sgpio-port-ranges";
+ struct device *dev = priv->dev;
+ u32 range_params[64];
+ int i, nranges, ret;
+
+ /* Calculate port mask */
+ nranges = device_property_count_u32(dev, range_property_name);
+ if (nranges < 2 || nranges % 2 || nranges > ARRAY_SIZE(range_params)) {
+ dev_err(dev, "%s port range: '%s' property\n",
+ nranges == -EINVAL ? "Missing" : "Invalid",
+ range_property_name);
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u32_array(dev, range_property_name,
+ range_params, nranges);
+ if (ret) {
+ dev_err(dev, "failed to parse '%s' property: %d\n",
+ range_property_name, ret);
+ return ret;
+ }
+ for (i = 0; i < nranges; i += 2) {
+ int start, end;
+
+ start = range_params[i];
+ end = range_params[i + 1];
+ if (start > end || end >= SGPIO_BITS_PER_WORD) {
+ dev_err(dev, "Ill-formed port-range [%d:%d]\n",
+ start, end);
+ }
+ priv->ports |= GENMASK(end, start);
+ }
+
+ return 0;
+}
+
+static void microchip_sgpio_irq_settype(struct irq_data *data,
+ int type,
+ int polarity)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct sgpio_bank *bank = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+ struct sgpio_port_addr addr;
+ u32 ena;
+
+ sgpio_pin_to_addr(bank->priv, gpio, &addr);
+
+ /* Disable interrupt while changing type */
+ ena = sgpio_readl(bank->priv, REG_INT_ENABLE, addr.bit);
+ sgpio_writel(bank->priv, ena & ~BIT(addr.port), REG_INT_ENABLE, addr.bit);
+
+ /* Type value spread over 2 registers sets: low, high bit */
+ sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit,
+ BIT(addr.port), (!!(type & 0x1)) << addr.port);
+ sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit,
+ BIT(addr.port), (!!(type & 0x2)) << addr.port);
+
+ if (type == SGPIO_INT_TRG_LEVEL)
+ sgpio_clrsetbits(bank->priv, REG_INT_POLARITY, addr.bit,
+ BIT(addr.port), polarity << addr.port);
+
+ /* Possibly re-enable interrupts */
+ sgpio_writel(bank->priv, ena, REG_INT_ENABLE, addr.bit);
+}
+
+static void microchip_sgpio_irq_setreg(struct irq_data *data,
+ int reg,
+ bool clear)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct sgpio_bank *bank = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+ struct sgpio_port_addr addr;
+
+ sgpio_pin_to_addr(bank->priv, gpio, &addr);
+
+ if (clear)
+ sgpio_clrsetbits(bank->priv, reg, addr.bit, BIT(addr.port), 0);
+ else
+ sgpio_clrsetbits(bank->priv, reg, addr.bit, 0, BIT(addr.port));
+}
+
+static void microchip_sgpio_irq_mask(struct irq_data *data)
+{
+ microchip_sgpio_irq_setreg(data, REG_INT_ENABLE, true);
+}
+
+static void microchip_sgpio_irq_unmask(struct irq_data *data)
+{
+ microchip_sgpio_irq_setreg(data, REG_INT_ENABLE, false);
+}
+
+static void microchip_sgpio_irq_ack(struct irq_data *data)
+{
+ microchip_sgpio_irq_setreg(data, REG_INT_ACK, false);
+}
+
+static int microchip_sgpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_set_handler_locked(data, handle_edge_irq);
+ microchip_sgpio_irq_settype(data, SGPIO_INT_TRG_EDGE, 0);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_set_handler_locked(data, handle_edge_irq);
+ microchip_sgpio_irq_settype(data, SGPIO_INT_TRG_EDGE_RISE, 0);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler_locked(data, handle_edge_irq);
+ microchip_sgpio_irq_settype(data, SGPIO_INT_TRG_EDGE_FALL, 0);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler_locked(data, handle_level_irq);
+ microchip_sgpio_irq_settype(data, SGPIO_INT_TRG_LEVEL, SGPIO_TRG_LEVEL_HIGH);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(data, handle_level_irq);
+ microchip_sgpio_irq_settype(data, SGPIO_INT_TRG_LEVEL, SGPIO_TRG_LEVEL_LOW);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct irq_chip microchip_sgpio_irqchip = {
+ .name = "gpio",
+ .irq_mask = microchip_sgpio_irq_mask,
+ .irq_ack = microchip_sgpio_irq_ack,
+ .irq_unmask = microchip_sgpio_irq_unmask,
+ .irq_set_type = microchip_sgpio_irq_set_type,
+};
+
+static void sgpio_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *parent_chip = irq_desc_get_chip(desc);
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct sgpio_bank *bank = gpiochip_get_data(chip);
+ struct sgpio_priv *priv = bank->priv;
+ int bit, port, gpio;
+ long val;
+
+ for (bit = 0; bit < priv->bitcount; bit++) {
+ val = sgpio_readl(priv, REG_INT_IDENT, bit);
+ if (!val)
+ continue;
+
+ chained_irq_enter(parent_chip, desc);
+
+ for_each_set_bit(port, &val, SGPIO_BITS_PER_WORD) {
+ gpio = sgpio_addr_to_pin(priv, port, bit);
+ generic_handle_irq(irq_linear_revmap(chip->irq.domain, gpio));
+ }
+
+ chained_irq_exit(parent_chip, desc);
+ }
+}
+
+static int microchip_sgpio_register_bank(struct device *dev,
+ struct sgpio_priv *priv,
+ struct fwnode_handle *fwnode,
+ int bankno)
+{
+ struct pinctrl_pin_desc *pins;
+ struct pinctrl_desc *pctl_desc;
+ struct pinctrl_dev *pctldev;
+ struct sgpio_bank *bank;
+ struct gpio_chip *gc;
+ u32 ngpios;
+ int i, ret;
+
+ /* Get overall bank struct */
+ bank = (bankno == 0) ? &priv->in : &priv->out;
+ bank->priv = priv;
+
+ if (fwnode_property_read_u32(fwnode, "ngpios", &ngpios)) {
+ dev_info(dev, "failed to get number of gpios for bank%d\n",
+ bankno);
+ ngpios = 64;
+ }
+
+ priv->bitcount = ngpios / SGPIO_BITS_PER_WORD;
+ if (priv->bitcount > SGPIO_MAX_BITS) {
+ dev_err(dev, "Bit width exceeds maximum (%d)\n",
+ SGPIO_MAX_BITS);
+ return -EINVAL;
+ }
+
+ pctl_desc = &bank->pctl_desc;
+ pctl_desc->name = devm_kasprintf(dev, GFP_KERNEL, "%s-%sput",
+ dev_name(dev),
+ bank->is_input ? "in" : "out");
+ pctl_desc->pctlops = &sgpio_pctl_ops;
+ pctl_desc->pmxops = &sgpio_pmx_ops;
+ pctl_desc->confops = &sgpio_confops;
+ pctl_desc->owner = THIS_MODULE;
+
+ pins = devm_kzalloc(dev, sizeof(*pins)*ngpios, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pctl_desc->npins = ngpios;
+ pctl_desc->pins = pins;
+
+ for (i = 0; i < ngpios; i++) {
+ struct sgpio_port_addr addr;
+
+ sgpio_pin_to_addr(priv, i, &addr);
+
+ pins[i].number = i;
+ pins[i].name = devm_kasprintf(dev, GFP_KERNEL,
+ "SGPIO_%c_p%db%d",
+ bank->is_input ? 'I' : 'O',
+ addr.port, addr.bit);
+ if (!pins[i].name)
+ return -ENOMEM;
+ }
+
+ pctldev = devm_pinctrl_register(dev, pctl_desc, bank);
+ if (IS_ERR(pctldev))
+ return dev_err_probe(dev, PTR_ERR(pctldev), "Failed to register pinctrl\n");
+
+ gc = &bank->gpio;
+ gc->label = pctl_desc->name;
+ gc->parent = dev;
+ gc->of_node = to_of_node(fwnode);
+ gc->owner = THIS_MODULE;
+ gc->get_direction = microchip_sgpio_get_direction;
+ gc->direction_input = microchip_sgpio_direction_input;
+ gc->direction_output = microchip_sgpio_direction_output;
+ gc->get = microchip_sgpio_get_value;
+ gc->set = microchip_sgpio_set_value;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->of_xlate = microchip_sgpio_of_xlate;
+ gc->of_gpio_n_cells = 3;
+ gc->base = -1;
+ gc->ngpio = ngpios;
+
+ if (bank->is_input && priv->properties->flags & SGPIO_FLAGS_HAS_IRQ) {
+ int irq = fwnode_irq_get(fwnode, 0);
+
+ if (irq) {
+ struct gpio_irq_chip *girq = &gc->irq;
+
+ girq->chip = devm_kmemdup(dev, &microchip_sgpio_irqchip,
+ sizeof(microchip_sgpio_irqchip),
+ GFP_KERNEL);
+ if (!girq->chip)
+ return -ENOMEM;
+ girq->parent_handler = sgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ /* Disable all individual pins */
+ for (i = 0; i < SGPIO_MAX_BITS; i++)
+ sgpio_writel(priv, 0, REG_INT_ENABLE, i);
+ /* Master enable */
+ sgpio_clrsetbits(priv, REG_SIO_CONFIG, 0, 0, SGPIO_MASTER_INTR_ENA);
+ }
+ }
+
+ ret = devm_gpiochip_add_data(dev, gc, bank);
+ if (ret)
+ dev_err(dev, "Failed to register: ret %d\n", ret);
+
+ return ret;
+}
+
+static int microchip_sgpio_probe(struct platform_device *pdev)
+{
+ int div_clock = 0, ret, port, i, nbanks;
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *fwnode;
+ struct sgpio_priv *priv;
+ struct clk *clk;
+ u32 val;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Failed to get clock\n");
+
+ div_clock = clk_get_rate(clk);
+ if (device_property_read_u32(dev, "bus-frequency", &priv->clock))
+ priv->clock = 12500000;
+ if (priv->clock == 0 || priv->clock > (div_clock / 2)) {
+ dev_err(dev, "Invalid frequency %d\n", priv->clock);
+ return -EINVAL;
+ }
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+ priv->properties = device_get_match_data(dev);
+ priv->in.is_input = true;
+
+ /* Get rest of device properties */
+ ret = microchip_sgpio_get_ports(priv);
+ if (ret)
+ return ret;
+
+ nbanks = device_get_child_node_count(dev);
+ if (nbanks != 2) {
+ dev_err(dev, "Must have 2 banks (have %d)\n", nbanks);
+ return -EINVAL;
+ }
+
+ i = 0;
+ device_for_each_child_node(dev, fwnode) {
+ ret = microchip_sgpio_register_bank(dev, priv, fwnode, i++);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->in.gpio.ngpio != priv->out.gpio.ngpio) {
+ dev_err(dev, "Banks must have same GPIO count\n");
+ return -ERANGE;
+ }
+
+ sgpio_configure_bitstream(priv);
+
+ val = max(2U, div_clock / priv->clock);
+ sgpio_configure_clock(priv, val);
+
+ for (port = 0; port < SGPIO_BITS_PER_WORD; port++)
+ sgpio_writel(priv, 0, REG_PORT_CONFIG, port);
+ sgpio_writel(priv, priv->ports, REG_PORT_ENABLE, 0);
+
+ return 0;
+}
+
+static const struct of_device_id microchip_sgpio_gpio_of_match[] = {
+ {
+ .compatible = "microchip,sparx5-sgpio",
+ .data = &properties_sparx5,
+ }, {
+ .compatible = "mscc,luton-sgpio",
+ .data = &properties_luton,
+ }, {
+ .compatible = "mscc,ocelot-sgpio",
+ .data = &properties_ocelot,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver microchip_sgpio_pinctrl_driver = {
+ .driver = {
+ .name = "pinctrl-microchip-sgpio",
+ .of_match_table = microchip_sgpio_gpio_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = microchip_sgpio_probe,
+};
+builtin_platform_driver(microchip_sgpio_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index a4a1b00f7f0d..2fd18e356d0c 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -158,6 +158,170 @@ struct ocelot_pinctrl {
u8 stride;
};
+#define LUTON_P(p, f0, f1) \
+static struct ocelot_pin_caps luton_pin_##p = { \
+ .pin = p, \
+ .functions = { \
+ FUNC_GPIO, FUNC_##f0, FUNC_##f1, FUNC_NONE, \
+ }, \
+}
+
+LUTON_P(0, SG0, NONE);
+LUTON_P(1, SG0, NONE);
+LUTON_P(2, SG0, NONE);
+LUTON_P(3, SG0, NONE);
+LUTON_P(4, TACHO, NONE);
+LUTON_P(5, TWI, PHY_LED);
+LUTON_P(6, TWI, PHY_LED);
+LUTON_P(7, NONE, PHY_LED);
+LUTON_P(8, EXT_IRQ, PHY_LED);
+LUTON_P(9, EXT_IRQ, PHY_LED);
+LUTON_P(10, SFP, PHY_LED);
+LUTON_P(11, SFP, PHY_LED);
+LUTON_P(12, SFP, PHY_LED);
+LUTON_P(13, SFP, PHY_LED);
+LUTON_P(14, SI, PHY_LED);
+LUTON_P(15, SI, PHY_LED);
+LUTON_P(16, SI, PHY_LED);
+LUTON_P(17, SFP, PHY_LED);
+LUTON_P(18, SFP, PHY_LED);
+LUTON_P(19, SFP, PHY_LED);
+LUTON_P(20, SFP, PHY_LED);
+LUTON_P(21, SFP, PHY_LED);
+LUTON_P(22, SFP, PHY_LED);
+LUTON_P(23, SFP, PHY_LED);
+LUTON_P(24, SFP, PHY_LED);
+LUTON_P(25, SFP, PHY_LED);
+LUTON_P(26, SFP, PHY_LED);
+LUTON_P(27, SFP, PHY_LED);
+LUTON_P(28, SFP, PHY_LED);
+LUTON_P(29, PWM, NONE);
+LUTON_P(30, UART, NONE);
+LUTON_P(31, UART, NONE);
+
+#define LUTON_PIN(n) { \
+ .number = n, \
+ .name = "GPIO_"#n, \
+ .drv_data = &luton_pin_##n \
+}
+
+static const struct pinctrl_pin_desc luton_pins[] = {
+ LUTON_PIN(0),
+ LUTON_PIN(1),
+ LUTON_PIN(2),
+ LUTON_PIN(3),
+ LUTON_PIN(4),
+ LUTON_PIN(5),
+ LUTON_PIN(6),
+ LUTON_PIN(7),
+ LUTON_PIN(8),
+ LUTON_PIN(9),
+ LUTON_PIN(10),
+ LUTON_PIN(11),
+ LUTON_PIN(12),
+ LUTON_PIN(13),
+ LUTON_PIN(14),
+ LUTON_PIN(15),
+ LUTON_PIN(16),
+ LUTON_PIN(17),
+ LUTON_PIN(18),
+ LUTON_PIN(19),
+ LUTON_PIN(20),
+ LUTON_PIN(21),
+ LUTON_PIN(22),
+ LUTON_PIN(23),
+ LUTON_PIN(24),
+ LUTON_PIN(25),
+ LUTON_PIN(26),
+ LUTON_PIN(27),
+ LUTON_PIN(28),
+ LUTON_PIN(29),
+ LUTON_PIN(30),
+ LUTON_PIN(31),
+};
+
+#define SERVAL_P(p, f0, f1, f2) \
+static struct ocelot_pin_caps serval_pin_##p = { \
+ .pin = p, \
+ .functions = { \
+ FUNC_GPIO, FUNC_##f0, FUNC_##f1, FUNC_##f2, \
+ }, \
+}
+
+SERVAL_P(0, SG0, NONE, NONE);
+SERVAL_P(1, SG0, NONE, NONE);
+SERVAL_P(2, SG0, NONE, NONE);
+SERVAL_P(3, SG0, NONE, NONE);
+SERVAL_P(4, TACHO, NONE, NONE);
+SERVAL_P(5, PWM, NONE, NONE);
+SERVAL_P(6, TWI, NONE, NONE);
+SERVAL_P(7, TWI, NONE, NONE);
+SERVAL_P(8, SI, NONE, NONE);
+SERVAL_P(9, SI, MD, NONE);
+SERVAL_P(10, SI, MD, NONE);
+SERVAL_P(11, SFP, MD, TWI_SCL_M);
+SERVAL_P(12, SFP, MD, TWI_SCL_M);
+SERVAL_P(13, SFP, UART2, TWI_SCL_M);
+SERVAL_P(14, SFP, UART2, TWI_SCL_M);
+SERVAL_P(15, SFP, PTP0, TWI_SCL_M);
+SERVAL_P(16, SFP, PTP0, TWI_SCL_M);
+SERVAL_P(17, SFP, PCI_WAKE, TWI_SCL_M);
+SERVAL_P(18, SFP, NONE, TWI_SCL_M);
+SERVAL_P(19, SFP, NONE, TWI_SCL_M);
+SERVAL_P(20, SFP, NONE, TWI_SCL_M);
+SERVAL_P(21, SFP, NONE, TWI_SCL_M);
+SERVAL_P(22, NONE, NONE, NONE);
+SERVAL_P(23, NONE, NONE, NONE);
+SERVAL_P(24, NONE, NONE, NONE);
+SERVAL_P(25, NONE, NONE, NONE);
+SERVAL_P(26, UART, NONE, NONE);
+SERVAL_P(27, UART, NONE, NONE);
+SERVAL_P(28, IRQ0, NONE, NONE);
+SERVAL_P(29, IRQ1, NONE, NONE);
+SERVAL_P(30, PTP0, NONE, NONE);
+SERVAL_P(31, PTP0, NONE, NONE);
+
+#define SERVAL_PIN(n) { \
+ .number = n, \
+ .name = "GPIO_"#n, \
+ .drv_data = &serval_pin_##n \
+}
+
+static const struct pinctrl_pin_desc serval_pins[] = {
+ SERVAL_PIN(0),
+ SERVAL_PIN(1),
+ SERVAL_PIN(2),
+ SERVAL_PIN(3),
+ SERVAL_PIN(4),
+ SERVAL_PIN(5),
+ SERVAL_PIN(6),
+ SERVAL_PIN(7),
+ SERVAL_PIN(8),
+ SERVAL_PIN(9),
+ SERVAL_PIN(10),
+ SERVAL_PIN(11),
+ SERVAL_PIN(12),
+ SERVAL_PIN(13),
+ SERVAL_PIN(14),
+ SERVAL_PIN(15),
+ SERVAL_PIN(16),
+ SERVAL_PIN(17),
+ SERVAL_PIN(18),
+ SERVAL_PIN(19),
+ SERVAL_PIN(20),
+ SERVAL_PIN(21),
+ SERVAL_PIN(22),
+ SERVAL_PIN(23),
+ SERVAL_PIN(24),
+ SERVAL_PIN(25),
+ SERVAL_PIN(26),
+ SERVAL_PIN(27),
+ SERVAL_PIN(28),
+ SERVAL_PIN(29),
+ SERVAL_PIN(30),
+ SERVAL_PIN(31),
+};
+
#define OCELOT_P(p, f0, f1, f2) \
static struct ocelot_pin_caps ocelot_pin_##p = { \
.pin = p, \
@@ -729,7 +893,7 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
if (err)
return err;
if (param == PIN_CONFIG_BIAS_DISABLE)
- val = (val == 0 ? true : false);
+ val = (val == 0);
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
val = (val & BIAS_PD_BIT ? true : false);
else /* PIN_CONFIG_BIAS_PULL_UP */
@@ -868,6 +1032,24 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
+static struct pinctrl_desc luton_desc = {
+ .name = "luton-pinctrl",
+ .pins = luton_pins,
+ .npins = ARRAY_SIZE(luton_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+};
+
+static struct pinctrl_desc serval_desc = {
+ .name = "serval-pinctrl",
+ .pins = serval_pins,
+ .npins = ARRAY_SIZE(serval_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+};
+
static struct pinctrl_desc ocelot_desc = {
.name = "ocelot-pinctrl",
.pins = ocelot_pins,
@@ -1151,6 +1333,8 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
}
static const struct of_device_id ocelot_pinctrl_of_match[] = {
+ { .compatible = "mscc,luton-pinctrl", .data = &luton_desc },
+ { .compatible = "mscc,serval-pinctrl", .data = &serval_desc },
{ .compatible = "mscc,ocelot-pinctrl", .data = &ocelot_desc },
{ .compatible = "mscc,jaguar2-pinctrl", .data = &jaguar2_desc },
{ .compatible = "microchip,sparx5-pinctrl", .data = &sparx5_desc },
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 5fe7b8aaf69d..a003776506d0 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -2,7 +2,8 @@
if (ARCH_QCOM || COMPILE_TEST)
config PINCTRL_MSM
- bool
+ tristate "Qualcomm core pin controller driver"
+ depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -13,7 +14,7 @@ config PINCTRL_MSM
config PINCTRL_APQ8064
tristate "Qualcomm APQ8064 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm APQ8064 platform.
@@ -21,7 +22,7 @@ config PINCTRL_APQ8064
config PINCTRL_APQ8084
tristate "Qualcomm APQ8084 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm APQ8084 platform.
@@ -29,7 +30,7 @@ config PINCTRL_APQ8084
config PINCTRL_IPQ4019
tristate "Qualcomm IPQ4019 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm IPQ4019 platform.
@@ -37,7 +38,7 @@ config PINCTRL_IPQ4019
config PINCTRL_IPQ8064
tristate "Qualcomm IPQ8064 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm IPQ8064 platform.
@@ -45,7 +46,7 @@ config PINCTRL_IPQ8064
config PINCTRL_IPQ8074
tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for
the Qualcomm Technologies Inc. TLMM block found on the
@@ -55,7 +56,7 @@ config PINCTRL_IPQ8074
config PINCTRL_IPQ6018
tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for
the Qualcomm Technologies Inc. TLMM block found on the
@@ -65,7 +66,7 @@ config PINCTRL_IPQ6018
config PINCTRL_MSM8226
tristate "Qualcomm 8226 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
@@ -74,7 +75,7 @@ config PINCTRL_MSM8226
config PINCTRL_MSM8660
tristate "Qualcomm 8660 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8660 platform.
@@ -82,7 +83,7 @@ config PINCTRL_MSM8660
config PINCTRL_MSM8960
tristate "Qualcomm 8960 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8960 platform.
@@ -90,7 +91,7 @@ config PINCTRL_MSM8960
config PINCTRL_MDM9615
tristate "Qualcomm 9615 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 9615 platform.
@@ -98,7 +99,7 @@ config PINCTRL_MDM9615
config PINCTRL_MSM8X74
tristate "Qualcomm 8x74 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8974 platform.
@@ -106,15 +107,25 @@ config PINCTRL_MSM8X74
config PINCTRL_MSM8916
tristate "Qualcomm 8916 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_MSM8953
+ tristate "Qualcomm 8953 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8953 platform.
+ The Qualcomm APQ8053, SDM450, SDM632 platforms are also
+ supported by this driver.
+
config PINCTRL_MSM8976
tristate "Qualcomm 8976 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm MSM8976 platform.
@@ -124,7 +135,7 @@ config PINCTRL_MSM8976
config PINCTRL_MSM8994
tristate "Qualcomm 8994 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8994 platform. The
@@ -133,7 +144,7 @@ config PINCTRL_MSM8994
config PINCTRL_MSM8996
tristate "Qualcomm MSM8996 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm MSM8996 platform.
@@ -141,7 +152,7 @@ config PINCTRL_MSM8996
config PINCTRL_MSM8998
tristate "Qualcomm MSM8998 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm MSM8998 platform.
@@ -149,7 +160,7 @@ config PINCTRL_MSM8998
config PINCTRL_QCS404
tristate "Qualcomm QCS404 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
TLMM block found in the Qualcomm QCS404 platform.
@@ -157,7 +168,7 @@ config PINCTRL_QCS404
config PINCTRL_QDF2XXX
tristate "Qualcomm Technologies QDF2xxx pin controller driver"
depends on GPIOLIB && ACPI
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the GPIO driver for the TLMM block found on the
Qualcomm Technologies QDF2xxx SOCs.
@@ -194,16 +205,25 @@ config PINCTRL_QCOM_SSBI_PMIC
config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SC7180 platform.
+config PINCTRL_SC7280
+ tristate "Qualcomm Technologies Inc SC7280 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SC7280 platform.
+
config PINCTRL_SDM660
tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
@@ -212,16 +232,25 @@ config PINCTRL_SDM660
config PINCTRL_SDM845
tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
depends on GPIOLIB && (OF || ACPI)
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SDM845 platform.
+config PINCTRL_SDX55
+ tristate "Qualcomm Technologies Inc SDX55 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDX55 platform.
+
config PINCTRL_SM8150
tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
@@ -230,10 +259,21 @@ config PINCTRL_SM8150
config PINCTRL_SM8250
tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8250 platform.
+config PINCTRL_LPASS_LPI
+ tristate "Qualcomm Technologies Inc LPASS LPI pin controller driver"
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ depends on GPIOLIB
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SoCs.
+
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 9e3d9c91a444..91875a3f5ac4 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_MSM8953) += pinctrl-msm8953.o
obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
@@ -24,7 +25,10 @@ obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
obj-$(CONFIG_PINCTRL_SC7180) += pinctrl-sc7180.o
+obj-$(CONFIG_PINCTRL_SC7280) += pinctrl-sc7280.o
obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o
obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
+obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
+obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
new file mode 100644
index 000000000000..369ee20a7ea9
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020 Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define LPI_SLEW_RATE_CTL_REG 0xa000
+#define LPI_TLMM_REG_OFFSET 0x1000
+#define LPI_SLEW_RATE_MAX 0x03
+#define LPI_SLEW_BITS_SIZE 0x02
+#define LPI_SLEW_RATE_MASK GENMASK(1, 0)
+#define LPI_GPIO_CFG_REG 0x00
+#define LPI_GPIO_PULL_MASK GENMASK(1, 0)
+#define LPI_GPIO_FUNCTION_MASK GENMASK(5, 2)
+#define LPI_GPIO_OUT_STRENGTH_MASK GENMASK(8, 6)
+#define LPI_GPIO_OE_MASK BIT(9)
+#define LPI_GPIO_VALUE_REG 0x04
+#define LPI_GPIO_VALUE_IN_MASK BIT(0)
+#define LPI_GPIO_VALUE_OUT_MASK BIT(1)
+
+#define LPI_GPIO_BIAS_DISABLE 0x0
+#define LPI_GPIO_PULL_DOWN 0x1
+#define LPI_GPIO_KEEPER 0x2
+#define LPI_GPIO_PULL_UP 0x3
+#define LPI_GPIO_DS_TO_VAL(v) (v / 2 - 1)
+#define NO_SLEW -1
+
+#define LPI_FUNCTION(fname) \
+ [LPI_MUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define LPI_PINGROUP(id, soff, f1, f2, f3, f4) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .pin = id, \
+ .slew_offset = soff, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ LPI_MUX_gpio, \
+ LPI_MUX_##f1, \
+ LPI_MUX_##f2, \
+ LPI_MUX_##f3, \
+ LPI_MUX_##f4, \
+ }, \
+ .nfuncs = 5, \
+ }
+
+struct lpi_pingroup {
+ const char *name;
+ const unsigned int *pins;
+ unsigned int npins;
+ unsigned int pin;
+ /* Bit offset in slew register for SoundWire pins only */
+ int slew_offset;
+ unsigned int *funcs;
+ unsigned int nfuncs;
+};
+
+struct lpi_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+struct lpi_pinctrl_variant_data {
+ const struct pinctrl_pin_desc *pins;
+ int npins;
+ const struct lpi_pingroup *groups;
+ int ngroups;
+ const struct lpi_function *functions;
+ int nfunctions;
+};
+
+#define MAX_LPI_NUM_CLKS 2
+
+struct lpi_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *ctrl;
+ struct gpio_chip chip;
+ struct pinctrl_desc desc;
+ char __iomem *tlmm_base;
+ char __iomem *slew_base;
+ struct clk_bulk_data clks[MAX_LPI_NUM_CLKS];
+ struct mutex slew_access_lock;
+ const struct lpi_pinctrl_variant_data *data;
+};
+
+/* sm8250 variant specific data */
+static const struct pinctrl_pin_desc sm8250_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+};
+
+enum sm8250_lpi_functions {
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_dmic3_clk,
+ LPI_MUX_dmic3_data,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_qua_mi2s_data,
+ LPI_MUX_qua_mi2s_sclk,
+ LPI_MUX_qua_mi2s_ws,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_wsa_swr_clk,
+ LPI_MUX_wsa_swr_data,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static const unsigned int gpio0_pins[] = { 0 };
+static const unsigned int gpio1_pins[] = { 1 };
+static const unsigned int gpio2_pins[] = { 2 };
+static const unsigned int gpio3_pins[] = { 3 };
+static const unsigned int gpio4_pins[] = { 4 };
+static const unsigned int gpio5_pins[] = { 5 };
+static const unsigned int gpio6_pins[] = { 6 };
+static const unsigned int gpio7_pins[] = { 7 };
+static const unsigned int gpio8_pins[] = { 8 };
+static const unsigned int gpio9_pins[] = { 9 };
+static const unsigned int gpio10_pins[] = { 10 };
+static const unsigned int gpio11_pins[] = { 11 };
+static const unsigned int gpio12_pins[] = { 12 };
+static const unsigned int gpio13_pins[] = { 13 };
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio5" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const qua_mi2s_sclk_groups[] = { "gpio0" };
+static const char * const qua_mi2s_ws_groups[] = { "gpio1" };
+static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const i2s2_data_groups[] = { "gpio12", "gpio12" };
+
+static const struct lpi_pingroup sm8250_groups[] = {
+ LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _),
+ LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _),
+ LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _),
+ LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(5, 12, swr_tx_data, swr_rx_data, _, _),
+ LPI_PINGROUP(6, NO_SLEW, dmic1_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, NO_SLEW, dmic1_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, NO_SLEW, dmic2_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, NO_SLEW, dmic2_data, i2s1_data, _, _),
+ LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _),
+ LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _),
+ LPI_PINGROUP(12, NO_SLEW, dmic3_clk, i2s2_data, _, _),
+ LPI_PINGROUP(13, NO_SLEW, dmic3_data, i2s2_data, _, _),
+};
+
+static const struct lpi_function sm8250_functions[] = {
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(dmic3_clk),
+ LPI_FUNCTION(dmic3_data),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(qua_mi2s_data),
+ LPI_FUNCTION(qua_mi2s_sclk),
+ LPI_FUNCTION(qua_mi2s_ws),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(wsa_swr_clk),
+ LPI_FUNCTION(wsa_swr_data),
+};
+
+static struct lpi_pinctrl_variant_data sm8250_lpi_data = {
+ .pins = sm8250_lpi_pins,
+ .npins = ARRAY_SIZE(sm8250_lpi_pins),
+ .groups = sm8250_groups,
+ .ngroups = ARRAY_SIZE(sm8250_groups),
+ .functions = sm8250_functions,
+ .nfunctions = ARRAY_SIZE(sm8250_functions),
+};
+
+static int lpi_gpio_read(struct lpi_pinctrl *state, unsigned int pin,
+ unsigned int addr)
+{
+ return ioread32(state->tlmm_base + LPI_TLMM_REG_OFFSET * pin + addr);
+}
+
+static int lpi_gpio_write(struct lpi_pinctrl *state, unsigned int pin,
+ unsigned int addr, unsigned int val)
+{
+ iowrite32(val, state->tlmm_base + LPI_TLMM_REG_OFFSET * pin + addr);
+
+ return 0;
+}
+
+static int lpi_gpio_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->data->ngroups;
+}
+
+static const char *lpi_gpio_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->data->groups[group].name;
+}
+
+static int lpi_gpio_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->data->groups[group].pins;
+ *num_pins = pctrl->data->groups[group].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops lpi_gpio_pinctrl_ops = {
+ .get_groups_count = lpi_gpio_get_groups_count,
+ .get_group_name = lpi_gpio_get_group_name,
+ .get_group_pins = lpi_gpio_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int lpi_gpio_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->data->nfunctions;
+}
+
+static const char *lpi_gpio_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int function)
+{
+ struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->data->functions[function].name;
+}
+
+static int lpi_gpio_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ const char *const **groups,
+ unsigned *const num_qgroups)
+{
+ struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->data->functions[function].groups;
+ *num_qgroups = pctrl->data->functions[function].ngroups;
+
+ return 0;
+}
+
+static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ unsigned int group_num)
+{
+ struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct lpi_pingroup *g = &pctrl->data->groups[group_num];
+ u32 val;
+ int i, pin = g->pin;
+
+ for (i = 0; i < g->nfuncs; i++) {
+ if (g->funcs[i] == function)
+ break;
+ }
+
+ if (WARN_ON(i == g->nfuncs))
+ return -EINVAL;
+
+ val = lpi_gpio_read(pctrl, pin, LPI_GPIO_CFG_REG);
+ u32p_replace_bits(&val, i, LPI_GPIO_FUNCTION_MASK);
+ lpi_gpio_write(pctrl, pin, LPI_GPIO_CFG_REG, val);
+
+ return 0;
+}
+
+static const struct pinmux_ops lpi_gpio_pinmux_ops = {
+ .get_functions_count = lpi_gpio_get_functions_count,
+ .get_function_name = lpi_gpio_get_function_name,
+ .get_function_groups = lpi_gpio_get_function_groups,
+ .set_mux = lpi_gpio_set_mux,
+};
+
+static int lpi_config_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ unsigned int param = pinconf_to_config_param(*config);
+ struct lpi_pinctrl *state = dev_get_drvdata(pctldev->dev);
+ unsigned int arg = 0;
+ int is_out;
+ int pull;
+ u32 ctl_reg;
+
+ ctl_reg = lpi_gpio_read(state, pin, LPI_GPIO_CFG_REG);
+ is_out = ctl_reg & LPI_GPIO_OE_MASK;
+ pull = FIELD_GET(LPI_GPIO_PULL_MASK, ctl_reg);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (pull == LPI_GPIO_BIAS_DISABLE)
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (pull == LPI_GPIO_PULL_DOWN)
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ if (pull == LPI_GPIO_KEEPER)
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (pull == LPI_GPIO_PULL_UP)
+ arg = 1;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ case PIN_CONFIG_OUTPUT:
+ if (is_out)
+ arg = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ unsigned long *configs, unsigned int nconfs)
+{
+ struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev);
+ unsigned int param, arg, pullup, strength;
+ bool value, output_enabled = false;
+ const struct lpi_pingroup *g;
+ unsigned long sval;
+ int i, slew_offset;
+ u32 val;
+
+ g = &pctrl->data->groups[group];
+ for (i = 0; i < nconfs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ pullup = LPI_GPIO_BIAS_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pullup = LPI_GPIO_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ pullup = LPI_GPIO_KEEPER;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pullup = LPI_GPIO_PULL_UP;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ output_enabled = false;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ output_enabled = true;
+ value = arg;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ strength = arg;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (arg > LPI_SLEW_RATE_MAX) {
+ dev_err(pctldev->dev, "invalid slew rate %u for pin: %d\n",
+ arg, group);
+ return -EINVAL;
+ }
+
+ slew_offset = g->slew_offset;
+ if (slew_offset == NO_SLEW)
+ break;
+
+ mutex_lock(&pctrl->slew_access_lock);
+
+ sval = ioread32(pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
+ sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
+ sval |= arg << slew_offset;
+ iowrite32(sval, pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
+
+ mutex_unlock(&pctrl->slew_access_lock);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);
+
+ u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
+ u32p_replace_bits(&val, LPI_GPIO_DS_TO_VAL(strength),
+ LPI_GPIO_OUT_STRENGTH_MASK);
+ u32p_replace_bits(&val, output_enabled, LPI_GPIO_OE_MASK);
+
+ lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);
+
+ if (output_enabled) {
+ val = u32_encode_bits(value ? 1 : 0, LPI_GPIO_VALUE_OUT_MASK);
+ lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops lpi_gpio_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_group_get = lpi_config_get,
+ .pin_config_group_set = lpi_config_set,
+};
+
+static int lpi_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
+{
+ struct lpi_pinctrl *state = gpiochip_get_data(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_INPUT_ENABLE, 1);
+
+ return lpi_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int lpi_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int pin, int val)
+{
+ struct lpi_pinctrl *state = gpiochip_get_data(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+
+ return lpi_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int lpi_gpio_get(struct gpio_chip *chip, unsigned int pin)
+{
+ struct lpi_pinctrl *state = gpiochip_get_data(chip);
+
+ return lpi_gpio_read(state, pin, LPI_GPIO_VALUE_REG) &
+ LPI_GPIO_VALUE_IN_MASK;
+}
+
+static void lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+{
+ struct lpi_pinctrl *state = gpiochip_get_data(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+
+ lpi_config_set(state->ctrl, pin, &config, 1);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static unsigned int lpi_regval_to_drive(u32 val)
+{
+ return (val + 1) * 2;
+}
+
+static void lpi_gpio_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned int offset,
+ unsigned int gpio)
+{
+ struct lpi_pinctrl *state = gpiochip_get_data(chip);
+ struct pinctrl_pin_desc pindesc;
+ unsigned int func;
+ int is_out;
+ int drive;
+ int pull;
+ u32 ctl_reg;
+
+ static const char * const pulls[] = {
+ "no pull",
+ "pull down",
+ "keeper",
+ "pull up"
+ };
+
+ pctldev = pctldev ? : state->ctrl;
+ pindesc = pctldev->desc->pins[offset];
+ ctl_reg = lpi_gpio_read(state, offset, LPI_GPIO_CFG_REG);
+ is_out = ctl_reg & LPI_GPIO_OE_MASK;
+
+ func = FIELD_GET(LPI_GPIO_FUNCTION_MASK, ctl_reg);
+ drive = FIELD_GET(LPI_GPIO_OUT_STRENGTH_MASK, ctl_reg);
+ pull = FIELD_GET(LPI_GPIO_PULL_MASK, ctl_reg);
+
+ seq_printf(s, " %-8s: %-3s %d", pindesc.name, is_out ? "out" : "in", func);
+ seq_printf(s, " %dmA", lpi_regval_to_drive(drive));
+ seq_printf(s, " %s", pulls[pull]);
+}
+
+static void lpi_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned int gpio = chip->base;
+ unsigned int i;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ lpi_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ seq_puts(s, "\n");
+ }
+}
+
+#else
+#define lpi_gpio_dbg_show NULL
+#endif
+
+static const struct gpio_chip lpi_gpio_template = {
+ .direction_input = lpi_gpio_direction_input,
+ .direction_output = lpi_gpio_direction_output,
+ .get = lpi_gpio_get,
+ .set = lpi_gpio_set,
+ .request = gpiochip_generic_request,
+ .free = gpiochip_generic_free,
+ .dbg_show = lpi_gpio_dbg_show,
+};
+
+static int lpi_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct lpi_pinctrl_variant_data *data;
+ struct device *dev = &pdev->dev;
+ struct lpi_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pctrl);
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ pctrl->data = data;
+ pctrl->dev = &pdev->dev;
+
+ pctrl->clks[0].id = "core";
+ pctrl->clks[1].id = "audio";
+
+ pctrl->tlmm_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pctrl->tlmm_base))
+ return dev_err_probe(dev, PTR_ERR(pctrl->tlmm_base),
+ "TLMM resource not provided\n");
+
+ pctrl->slew_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pctrl->slew_base))
+ return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
+ "Slew resource not provided\n");
+
+ ret = devm_clk_bulk_get(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Can't get clocks\n");
+
+ ret = clk_bulk_prepare_enable(MAX_LPI_NUM_CLKS, pctrl->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Can't enable clocks\n");
+
+ pctrl->desc.pctlops = &lpi_gpio_pinctrl_ops;
+ pctrl->desc.pmxops = &lpi_gpio_pinmux_ops;
+ pctrl->desc.confops = &lpi_gpio_pinconf_ops;
+ pctrl->desc.owner = THIS_MODULE;
+ pctrl->desc.name = dev_name(dev);
+ pctrl->desc.pins = data->pins;
+ pctrl->desc.npins = data->npins;
+ pctrl->chip = lpi_gpio_template;
+ pctrl->chip.parent = dev;
+ pctrl->chip.base = -1;
+ pctrl->chip.ngpio = data->npins;
+ pctrl->chip.label = dev_name(dev);
+ pctrl->chip.of_gpio_n_cells = 2;
+ pctrl->chip.can_sleep = false;
+
+ mutex_init(&pctrl->slew_access_lock);
+
+ pctrl->ctrl = devm_pinctrl_register(dev, &pctrl->desc, pctrl);
+ if (IS_ERR(pctrl->ctrl)) {
+ ret = PTR_ERR(pctrl->ctrl);
+ dev_err(dev, "failed to add pin controller\n");
+ goto err_pinctrl;
+ }
+
+ ret = devm_gpiochip_add_data(dev, &pctrl->chip, pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "can't add gpio chip\n");
+ goto err_pinctrl;
+ }
+
+ return 0;
+
+err_pinctrl:
+ mutex_destroy(&pctrl->slew_access_lock);
+ clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
+
+ return ret;
+}
+
+static int lpi_pinctrl_remove(struct platform_device *pdev)
+{
+ struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev);
+
+ mutex_destroy(&pctrl->slew_access_lock);
+ clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
+
+ return 0;
+}
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sm8250-lpass-lpi-pinctrl",
+ .data = &sm8250_lpi_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("QTI LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 77a25bdf0da7..d70caecd21d2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -51,6 +51,7 @@
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
* @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
+ * @disabled_for_mux: These IRQs were disabled because we muxed away.
* @soc: Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles.
* @phys_base: Physical base address
@@ -72,6 +73,7 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
@@ -96,6 +98,14 @@ MSM_ACCESSOR(intr_cfg)
MSM_ACCESSOR(intr_status)
MSM_ACCESSOR(intr_target)
+static void msm_ack_intr_status(struct msm_pinctrl *pctrl,
+ const struct msm_pingroup *g)
+{
+ u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0;
+
+ msm_writel_intr_status(val, pctrl, g);
+}
+
static int msm_get_groups_count(struct pinctrl_dev *pctldev)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -171,6 +181,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned group)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *gc = &pctrl->chip;
+ unsigned int irq = irq_find_mapping(gc->irq.domain, group);
+ struct irq_data *d = irq_get_irq_data(irq);
+ unsigned int gpio_func = pctrl->soc->gpio_func;
const struct msm_pingroup *g;
unsigned long flags;
u32 val, mask;
@@ -187,6 +201,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
+ /*
+ * If an GPIO interrupt is setup on this pin then we need special
+ * handling. Specifically interrupt detection logic will still see
+ * the pin twiddle even when we're muxed away.
+ *
+ * When we see a pin with an interrupt setup on it then we'll disable
+ * (mask) interrupts on it when we mux away until we mux back. Note
+ * that disable_irq() refcounts and interrupts are disabled as long as
+ * at least one disable_irq() has been called.
+ */
+ if (d && i != gpio_func &&
+ !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
+ disable_irq(irq);
+
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = msm_readl_ctl(pctrl, g);
@@ -196,6 +224,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (d && i == gpio_func &&
+ test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) {
+ /*
+ * Clear interrupts detected while not GPIO since we only
+ * masked things.
+ */
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
+ else
+ msm_ack_intr_status(pctrl, g);
+
+ enable_irq(irq);
+ }
+
return 0;
}
@@ -210,8 +252,7 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
if (!g->nfuncs)
return 0;
- /* For now assume function 0 is GPIO because it always is */
- return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
+ return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset);
}
static const struct pinmux_ops msm_pinmux_ops = {
@@ -774,7 +815,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
-static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+static void msm_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
@@ -792,17 +833,6 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- if (status_clear) {
- /*
- * clear the interrupt status bit before unmask to avoid
- * any erroneous interrupts that would have got latched
- * when the interrupt is not in use.
- */
- val = msm_readl_intr_status(pctrl, g);
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
- }
-
val = msm_readl_intr_cfg(pctrl, g);
val |= BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_enable_bit);
@@ -822,7 +852,7 @@ static void msm_gpio_irq_enable(struct irq_data *d)
irq_chip_enable_parent(d);
if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
- msm_gpio_irq_clear_unmask(d, true);
+ msm_gpio_irq_unmask(d);
}
static void msm_gpio_irq_disable(struct irq_data *d)
@@ -837,11 +867,6 @@ static void msm_gpio_irq_disable(struct irq_data *d)
msm_gpio_irq_mask(d);
}
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
- msm_gpio_irq_clear_unmask(d, false);
-}
-
/**
* msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
* @d: The irq dta.
@@ -894,7 +919,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
@@ -906,12 +930,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = msm_readl_intr_status(pctrl, g);
- if (g->intr_ack_high)
- val |= BIT(g->intr_status_bit);
- else
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
+ msm_ack_intr_status(pctrl, g);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -936,6 +955,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
+ bool was_enabled;
u32 val;
if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
@@ -997,6 +1017,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
val = msm_readl_intr_cfg(pctrl, g);
+ was_enabled = val & BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_raw_status_bit);
if (g->intr_detection_width == 2) {
val &= ~(3 << g->intr_detection_bit);
@@ -1046,6 +1067,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
}
msm_writel_intr_cfg(val, pctrl, g);
+ /*
+ * The first time we set RAW_STATUS_EN it could trigger an interrupt.
+ * Clear the interrupt. This is safe because we have
+ * IRQCHIP_SET_TYPE_MASKED.
+ */
+ if (!was_enabled)
+ msm_ack_intr_status(pctrl, g);
+
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -1099,16 +1128,11 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
}
/*
- * Clear the interrupt that may be pending before we enable
- * the line.
- * This is especially a problem with the GPIOs routed to the
- * PDC. These GPIOs are direct-connect interrupts to the GIC.
- * Disabling the interrupt line at the PDC does not prevent
- * the interrupt from being latched at the GIC. The state at
- * GIC needs to be cleared before enabling.
+ * The disable / clear-enable workaround we do in msm_pinmux_set_mux()
+ * only works if disable is not lazy since we only clear any bogus
+ * interrupt in hardware. Explicitly mark the interrupt as UNLAZY.
*/
- if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
return 0;
out:
@@ -1449,3 +1473,5 @@ int msm_pinctrl_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL(msm_pinctrl_remove);
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. TLMM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 333f99243c43..e31a5167c91e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -118,6 +118,7 @@ struct msm_gpio_wakeirq_map {
* @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
* to be aware that their parent can't handle dual
* edge interrupts.
+ * @gpio_func: Which function number is GPIO (usually 0).
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -134,6 +135,7 @@ struct msm_pinctrl_soc_data {
const struct msm_gpio_wakeirq_map *wakeirq_map;
unsigned int nwakeirq_map;
bool wakeirq_dual_edge_errata;
+ unsigned int gpio_func;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8953.c b/drivers/pinctrl/qcom/pinctrl-msm8953.c
new file mode 100644
index 000000000000..e0c939ff3d54
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8953.c
@@ -0,0 +1,1844 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020, The Linux Foundation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = 0x1000 * id, \
+ .io_reg = 0x4 + 0x1000 * id, \
+ .intr_cfg_reg = 0x8 + 0x1000 * id, \
+ .intr_status_reg = 0xc + 0x1000 * id, \
+ .intr_target_reg = 0x8 + 0x1000 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc msm8953_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "SDC1_CLK"),
+ PINCTRL_PIN(143, "SDC1_CMD"),
+ PINCTRL_PIN(144, "SDC1_DATA"),
+ PINCTRL_PIN(145, "SDC1_RCLK"),
+ PINCTRL_PIN(146, "SDC2_CLK"),
+ PINCTRL_PIN(147, "SDC2_CMD"),
+ PINCTRL_PIN(148, "SDC2_DATA"),
+ PINCTRL_PIN(149, "QDSD_CLK"),
+ PINCTRL_PIN(150, "QDSD_CMD"),
+ PINCTRL_PIN(151, "QDSD_DATA0"),
+ PINCTRL_PIN(152, "QDSD_DATA1"),
+ PINCTRL_PIN(153, "QDSD_DATA2"),
+ PINCTRL_PIN(154, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+
+static const unsigned int qdsd_clk_pins[] = { 142 };
+static const unsigned int qdsd_cmd_pins[] = { 143 };
+static const unsigned int qdsd_data0_pins[] = { 144 };
+static const unsigned int qdsd_data1_pins[] = { 145 };
+static const unsigned int qdsd_data2_pins[] = { 146 };
+static const unsigned int qdsd_data3_pins[] = { 147 };
+static const unsigned int sdc1_clk_pins[] = { 148 };
+static const unsigned int sdc1_cmd_pins[] = { 149 };
+static const unsigned int sdc1_data_pins[] = { 150 };
+static const unsigned int sdc1_rclk_pins[] = { 151 };
+static const unsigned int sdc2_clk_pins[] = { 152 };
+static const unsigned int sdc2_cmd_pins[] = { 153 };
+static const unsigned int sdc2_data_pins[] = { 154 };
+
+enum msm8953_functions {
+ msm_mux_accel_int,
+ msm_mux_adsp_ext,
+ msm_mux_alsp_int,
+ msm_mux_atest_bbrx0,
+ msm_mux_atest_bbrx1,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_gpsadc_dtest0_native,
+ msm_mux_atest_gpsadc_dtest1_native,
+ msm_mux_atest_tsens,
+ msm_mux_atest_wlan0,
+ msm_mux_atest_wlan1,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp1_spi,
+ msm_mux_blsp3_spi,
+ msm_mux_blsp6_spi,
+ msm_mux_blsp7_spi,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_i2c7,
+ msm_mux_blsp_i2c8,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_spi7,
+ msm_mux_blsp_spi8,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart4,
+ msm_mux_blsp_uart5,
+ msm_mux_blsp_uart6,
+ msm_mux_cam0_ldo,
+ msm_mux_cam1_ldo,
+ msm_mux_cam1_rst,
+ msm_mux_cam1_standby,
+ msm_mux_cam2_rst,
+ msm_mux_cam2_standby,
+ msm_mux_cam3_rst,
+ msm_mux_cam3_standby,
+ msm_mux_cam_irq,
+ msm_mux_cam_mclk,
+ msm_mux_cap_int,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cdc_pdm0,
+ msm_mux_codec_int1,
+ msm_mux_codec_int2,
+ msm_mux_codec_reset,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dac_calib0,
+ msm_mux_dac_calib1,
+ msm_mux_dac_calib2,
+ msm_mux_dac_calib3,
+ msm_mux_dac_calib4,
+ msm_mux_dac_calib5,
+ msm_mux_dac_calib6,
+ msm_mux_dac_calib7,
+ msm_mux_dac_calib8,
+ msm_mux_dac_calib9,
+ msm_mux_dac_calib10,
+ msm_mux_dac_calib11,
+ msm_mux_dac_calib12,
+ msm_mux_dac_calib13,
+ msm_mux_dac_calib14,
+ msm_mux_dac_calib15,
+ msm_mux_dac_calib16,
+ msm_mux_dac_calib17,
+ msm_mux_dac_calib18,
+ msm_mux_dac_calib19,
+ msm_mux_dac_calib20,
+ msm_mux_dac_calib21,
+ msm_mux_dac_calib22,
+ msm_mux_dac_calib23,
+ msm_mux_dac_calib24,
+ msm_mux_dac_calib25,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_dmic0_clk,
+ msm_mux_dmic0_data,
+ msm_mux_ebi_cdc,
+ msm_mux_ebi_ch0,
+ msm_mux_ext_lpass,
+ msm_mux_flash_strobe,
+ msm_mux_fp_int,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_gcc_plltest,
+ msm_mux_gcc_tlmm,
+ msm_mux_gpio,
+ msm_mux_gsm0_tx,
+ msm_mux_gsm1_tx,
+ msm_mux_gyro_int,
+ msm_mux_hall_int,
+ msm_mux_hdmi_int,
+ msm_mux_key_focus,
+ msm_mux_key_home,
+ msm_mux_key_snapshot,
+ msm_mux_key_volp,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_lpass_slimbus,
+ msm_mux_lpass_slimbus0,
+ msm_mux_lpass_slimbus1,
+ msm_mux_m_voc,
+ msm_mux_mag_int,
+ msm_mux_mdp_vsync,
+ msm_mux_mipi_dsi0,
+ msm_mux_modem_tsync,
+ msm_mux_mss_lte,
+ msm_mux_nav_pps,
+ msm_mux_nav_pps_in_a,
+ msm_mux_nav_pps_in_b,
+ msm_mux_nav_tsync,
+ msm_mux_nfc_disable,
+ msm_mux_nfc_dwl,
+ msm_mux_nfc_irq,
+ msm_mux_ois_sync,
+ msm_mux_pa_indicator,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pbs2,
+ msm_mux_pressure_int,
+ msm_mux_pri_mi2s,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_ws,
+ msm_mux_prng_rosc,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_pwr_down,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_sd_write,
+ msm_mux_sdcard_det,
+ msm_mux_sec_mi2s,
+ msm_mux_sec_mi2s_mclk_a,
+ msm_mux_sec_mi2s_mclk_b,
+ msm_mux_smb_int,
+ msm_mux_ss_switch,
+ msm_mux_ssbi_wtr1,
+ msm_mux_ts_resout,
+ msm_mux_ts_sample,
+ msm_mux_ts_xvdd,
+ msm_mux_tsens_max,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim_batt,
+ msm_mux_us_emitter,
+ msm_mux_us_euro,
+ msm_mux_wcss_bt,
+ msm_mux_wcss_fm,
+ msm_mux_wcss_wlan,
+ msm_mux_wcss_wlan0,
+ msm_mux_wcss_wlan1,
+ msm_mux_wcss_wlan2,
+ msm_mux_wsa_en,
+ msm_mux_wsa_io,
+ msm_mux_wsa_irq,
+ msm_mux__,
+};
+
+static const char * const accel_int_groups[] = {
+ "gpio42",
+};
+
+static const char * const adsp_ext_groups[] = {
+ "gpio1",
+};
+
+static const char * const alsp_int_groups[] = {
+ "gpio43",
+};
+
+static const char * const atest_bbrx0_groups[] = {
+ "gpio17",
+};
+
+static const char * const atest_bbrx1_groups[] = {
+ "gpio16",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio68",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio67",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio75",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio63",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio120",
+};
+
+static const char * const atest_gpsadc_dtest0_native_groups[] = {
+ "gpio7",
+};
+
+static const char * const atest_gpsadc_dtest1_native_groups[] = {
+ "gpio18",
+};
+
+static const char * const atest_tsens_groups[] = {
+ "gpio120",
+};
+
+static const char * const atest_wlan0_groups[] = {
+ "gpio22",
+};
+
+static const char * const atest_wlan1_groups[] = {
+ "gpio23",
+};
+
+static const char * const bimc_dte0_groups[] = {
+ "gpio63", "gpio65",
+};
+
+static const char * const bimc_dte1_groups[] = {
+ "gpio121", "gpio122",
+};
+
+static const char * const blsp1_spi_groups[] = {
+ "gpio35", "gpio36",
+};
+
+static const char * const blsp3_spi_groups[] = {
+ "gpio41", "gpio50",
+};
+
+static const char * const blsp6_spi_groups[] = {
+ "gpio47", "gpio48",
+};
+
+static const char * const blsp7_spi_groups[] = {
+ "gpio89", "gpio90",
+};
+
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+
+static const char * const blsp_i2c5_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+
+static const char * const blsp_i2c7_groups[] = {
+ "gpio135", "gpio136",
+};
+
+static const char * const blsp_i2c8_groups[] = {
+ "gpio98", "gpio99",
+};
+
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const blsp_spi7_groups[] = {
+ "gpio135", "gpio136", "gpio137", "gpio138",
+};
+
+static const char * const blsp_spi8_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const blsp_uart4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const blsp_uart5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const cam0_ldo_groups[] = {
+ "gpio50",
+};
+
+static const char * const cam1_ldo_groups[] = {
+ "gpio134",
+};
+
+static const char * const cam1_rst_groups[] = {
+ "gpio40",
+};
+
+static const char * const cam1_standby_groups[] = {
+ "gpio39",
+};
+
+static const char * const cam2_rst_groups[] = {
+ "gpio129",
+};
+
+static const char * const cam2_standby_groups[] = {
+ "gpio130",
+};
+
+static const char * const cam3_rst_groups[] = {
+ "gpio131",
+};
+
+static const char * const cam3_standby_groups[] = {
+ "gpio132",
+};
+
+static const char * const cam_irq_groups[] = {
+ "gpio35",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio128",
+};
+
+static const char * const cap_int_groups[] = {
+ "gpio13",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio38",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+
+static const char * const cci_timer0_groups[] = {
+ "gpio33",
+};
+
+static const char * const cci_timer1_groups[] = {
+ "gpio34",
+};
+
+static const char * const cci_timer2_groups[] = {
+ "gpio35",
+};
+
+static const char * const cci_timer3_groups[] = {
+ "gpio36",
+};
+
+static const char * const cci_timer4_groups[] = {
+ "gpio41",
+};
+
+static const char * const cdc_pdm0_groups[] = {
+ "gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72", "gpio73",
+ "gpio74",
+};
+
+static const char * const codec_int1_groups[] = {
+ "gpio73",
+};
+
+static const char * const codec_int2_groups[] = {
+ "gpio74",
+};
+
+static const char * const codec_reset_groups[] = {
+ "gpio67",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio85",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio86",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio87",
+};
+
+static const char * const dac_calib0_groups[] = {
+ "gpio4",
+};
+
+static const char * const dac_calib1_groups[] = {
+ "gpio12",
+};
+
+static const char * const dac_calib2_groups[] = {
+ "gpio13",
+};
+
+static const char * const dac_calib3_groups[] = {
+ "gpio28",
+};
+
+static const char * const dac_calib4_groups[] = {
+ "gpio29",
+};
+
+static const char * const dac_calib5_groups[] = {
+ "gpio39",
+};
+
+static const char * const dac_calib6_groups[] = {
+ "gpio40",
+};
+
+static const char * const dac_calib7_groups[] = {
+ "gpio41",
+};
+
+static const char * const dac_calib8_groups[] = {
+ "gpio42",
+};
+
+static const char * const dac_calib9_groups[] = {
+ "gpio43",
+};
+
+static const char * const dac_calib10_groups[] = {
+ "gpio44",
+};
+
+static const char * const dac_calib11_groups[] = {
+ "gpio45",
+};
+
+static const char * const dac_calib12_groups[] = {
+ "gpio46",
+};
+
+static const char * const dac_calib13_groups[] = {
+ "gpio47",
+};
+
+static const char * const dac_calib14_groups[] = {
+ "gpio48",
+};
+
+static const char * const dac_calib15_groups[] = {
+ "gpio20",
+};
+
+static const char * const dac_calib16_groups[] = {
+ "gpio21",
+};
+
+static const char * const dac_calib17_groups[] = {
+ "gpio67",
+};
+
+static const char * const dac_calib18_groups[] = {
+ "gpio115",
+};
+
+static const char * const dac_calib19_groups[] = {
+ "gpio30",
+};
+
+static const char * const dac_calib20_groups[] = {
+ "gpio128",
+};
+
+static const char * const dac_calib21_groups[] = {
+ "gpio129",
+};
+
+static const char * const dac_calib22_groups[] = {
+ "gpio130",
+};
+
+static const char * const dac_calib23_groups[] = {
+ "gpio131",
+};
+
+static const char * const dac_calib24_groups[] = {
+ "gpio132",
+};
+
+static const char * const dac_calib25_groups[] = {
+ "gpio133",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio63",
+};
+
+static const char * const ddr_bist_groups[] = {
+ "gpio129", "gpio130", "gpio131", "gpio132",
+};
+
+static const char * const dmic0_clk_groups[] = {
+ "gpio89",
+};
+
+static const char * const dmic0_data_groups[] = {
+ "gpio90",
+};
+
+static const char * const ebi_cdc_groups[] = {
+ "gpio67", "gpio69", "gpio118", "gpio119", "gpio120", "gpio123",
+};
+
+static const char * const ebi_ch0_groups[] = {
+ "gpio75",
+};
+
+static const char * const ext_lpass_groups[] = {
+ "gpio81",
+};
+
+static const char * const flash_strobe_groups[] = {
+ "gpio33", "gpio34",
+};
+
+static const char * const fp_int_groups[] = {
+ "gpio48",
+};
+
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio42",
+};
+
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio6", "gpio41",
+};
+
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio43",
+};
+
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio10",
+};
+
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio44",
+};
+
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio11",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio98", "gpio99",
+};
+
+static const char * const gcc_tlmm_groups[] = {
+ "gpio87",
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141",
+};
+
+static const char * const gsm0_tx_groups[] = {
+ "gpio117",
+};
+
+static const char * const gsm1_tx_groups[] = {
+ "gpio115",
+};
+
+static const char * const gyro_int_groups[] = {
+ "gpio45",
+};
+
+static const char * const hall_int_groups[] = {
+ "gpio12",
+};
+
+static const char * const hdmi_int_groups[] = {
+ "gpio90",
+};
+
+static const char * const key_focus_groups[] = {
+ "gpio87",
+};
+
+static const char * const key_home_groups[] = {
+ "gpio88",
+};
+
+static const char * const key_snapshot_groups[] = {
+ "gpio86",
+};
+
+static const char * const key_volp_groups[] = {
+ "gpio85",
+};
+
+static const char * const ldo_en_groups[] = {
+ "gpio5",
+};
+
+static const char * const ldo_update_groups[] = {
+ "gpio4",
+};
+
+static const char * const lpass_slimbus0_groups[] = {
+ "gpio71",
+};
+
+static const char * const lpass_slimbus1_groups[] = {
+ "gpio72",
+};
+
+static const char * const lpass_slimbus_groups[] = {
+ "gpio70",
+};
+
+static const char * const m_voc_groups[] = {
+ "gpio17", "gpio21",
+};
+
+static const char * const mag_int_groups[] = {
+ "gpio44",
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio24", "gpio25",
+};
+
+static const char * const mipi_dsi0_groups[] = {
+ "gpio61",
+};
+
+static const char * const modem_tsync_groups[] = {
+ "gpio113",
+};
+
+static const char * const mss_lte_groups[] = {
+ "gpio82", "gpio83",
+};
+
+static const char * const nav_pps_groups[] = {
+ "gpio113",
+};
+
+static const char * const nav_pps_in_a_groups[] = {
+ "gpio111",
+};
+
+static const char * const nav_pps_in_b_groups[] = {
+ "gpio113",
+};
+
+static const char * const nav_tsync_groups[] = {
+ "gpio113",
+};
+
+static const char * const nfc_disable_groups[] = {
+ "gpio16",
+};
+
+static const char * const nfc_dwl_groups[] = {
+ "gpio62",
+};
+
+static const char * const nfc_irq_groups[] = {
+ "gpio17",
+};
+
+static const char * const ois_sync_groups[] = {
+ "gpio36",
+};
+
+static const char * const pa_indicator_groups[] = {
+ "gpio112",
+};
+
+static const char * const pbs0_groups[] = {
+ "gpio85",
+};
+
+static const char * const pbs1_groups[] = {
+ "gpio86",
+};
+
+static const char * const pbs2_groups[] = {
+ "gpio87",
+};
+
+static const char * const pressure_int_groups[] = {
+ "gpio46",
+};
+
+static const char * const pri_mi2s_groups[] = {
+ "gpio66", "gpio88", "gpio91", "gpio93", "gpio94", "gpio95",
+};
+
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+
+static const char * const pri_mi2s_mclk_b_groups[] = {
+ "gpio69",
+};
+
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio92",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio2",
+};
+
+static const char * const pwr_crypto_enabled_a_groups[] = {
+ "gpio36",
+};
+
+static const char * const pwr_crypto_enabled_b_groups[] = {
+ "gpio13",
+};
+
+static const char * const pwr_down_groups[] = {
+ "gpio89",
+};
+
+static const char * const pwr_modem_enabled_a_groups[] = {
+ "gpio29",
+};
+
+static const char * const pwr_modem_enabled_b_groups[] = {
+ "gpio9",
+};
+
+static const char * const pwr_nav_enabled_a_groups[] = {
+ "gpio35",
+};
+
+static const char * const pwr_nav_enabled_b_groups[] = {
+ "gpio12",
+};
+
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio17",
+};
+
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio91",
+};
+
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio21",
+};
+
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio48",
+};
+
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio41",
+};
+
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio3",
+};
+
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio2",
+};
+
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio25",
+};
+
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio16",
+};
+
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio22",
+};
+
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio18",
+};
+
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio20",
+};
+
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio19", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio33", "gpio34", "gpio35", "gpio36", "gpio38", "gpio39",
+ "gpio40", "gpio50",
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio8", "gpio9", "gpio12", "gpio13", "gpio23", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio46", "gpio47", "gpio66", "gpio86", "gpio87",
+ "gpio88", "gpio92",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio50",
+};
+
+static const char * const sdcard_det_groups[] = {
+ "gpio133",
+};
+
+static const char * const sec_mi2s_groups[] = {
+ "gpio135", "gpio136", "gpio137", "gpio138",
+};
+
+static const char * const sec_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+
+static const char * const sec_mi2s_mclk_b_groups[] = {
+ "gpio66",
+};
+
+static const char * const smb_int_groups[] = {
+ "gpio1",
+};
+
+static const char * const ss_switch_groups[] = {
+ "gpio139",
+};
+
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio114", "gpio123",
+};
+
+static const char * const ts_resout_groups[] = {
+ "gpio64",
+};
+
+static const char * const ts_sample_groups[] = {
+ "gpio65",
+};
+
+static const char * const ts_xvdd_groups[] = {
+ "gpio60",
+};
+
+static const char * const tsens_max_groups[] = {
+ "gpio139",
+};
+
+static const char * const uim1_clk_groups[] = {
+ "gpio52",
+};
+
+static const char * const uim1_data_groups[] = {
+ "gpio51",
+};
+
+static const char * const uim1_present_groups[] = {
+ "gpio54",
+};
+
+static const char * const uim1_reset_groups[] = {
+ "gpio53",
+};
+
+static const char * const uim2_clk_groups[] = {
+ "gpio56",
+};
+
+static const char * const uim2_data_groups[] = {
+ "gpio55",
+};
+
+static const char * const uim2_present_groups[] = {
+ "gpio58",
+};
+
+static const char * const uim2_reset_groups[] = {
+ "gpio57",
+};
+
+static const char * const uim_batt_groups[] = {
+ "gpio49",
+};
+
+static const char * const us_emitter_groups[] = {
+ "gpio68",
+};
+
+static const char * const us_euro_groups[] = {
+ "gpio63",
+};
+
+static const char * const wcss_bt_groups[] = {
+ "gpio75", "gpio83", "gpio84",
+};
+
+static const char * const wcss_fm_groups[] = {
+ "gpio81", "gpio82",
+};
+
+static const char * const wcss_wlan0_groups[] = {
+ "gpio78",
+};
+
+static const char * const wcss_wlan1_groups[] = {
+ "gpio77",
+};
+
+static const char * const wcss_wlan2_groups[] = {
+ "gpio76",
+};
+
+static const char * const wcss_wlan_groups[] = {
+ "gpio79", "gpio80",
+};
+
+static const char * const wsa_en_groups[] = {
+ "gpio96",
+};
+
+static const char * const wsa_io_groups[] = {
+ "gpio94", "gpio95",
+};
+
+static const char * const wsa_irq_groups[] = {
+ "gpio97",
+};
+
+static const struct msm_function msm8953_functions[] = {
+ FUNCTION(accel_int),
+ FUNCTION(adsp_ext),
+ FUNCTION(alsp_int),
+ FUNCTION(atest_bbrx0),
+ FUNCTION(atest_bbrx1),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_gpsadc_dtest0_native),
+ FUNCTION(atest_gpsadc_dtest1_native),
+ FUNCTION(atest_tsens),
+ FUNCTION(atest_wlan0),
+ FUNCTION(atest_wlan1),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp1_spi),
+ FUNCTION(blsp3_spi),
+ FUNCTION(blsp6_spi),
+ FUNCTION(blsp7_spi),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_i2c7),
+ FUNCTION(blsp_i2c8),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_spi7),
+ FUNCTION(blsp_spi8),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart4),
+ FUNCTION(blsp_uart5),
+ FUNCTION(blsp_uart6),
+ FUNCTION(cam0_ldo),
+ FUNCTION(cam1_ldo),
+ FUNCTION(cam1_rst),
+ FUNCTION(cam1_standby),
+ FUNCTION(cam2_rst),
+ FUNCTION(cam2_standby),
+ FUNCTION(cam3_rst),
+ FUNCTION(cam3_standby),
+ FUNCTION(cam_irq),
+ FUNCTION(cam_mclk),
+ FUNCTION(cap_int),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(codec_int1),
+ FUNCTION(codec_int2),
+ FUNCTION(codec_reset),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dac_calib0),
+ FUNCTION(dac_calib1),
+ FUNCTION(dac_calib10),
+ FUNCTION(dac_calib11),
+ FUNCTION(dac_calib12),
+ FUNCTION(dac_calib13),
+ FUNCTION(dac_calib14),
+ FUNCTION(dac_calib15),
+ FUNCTION(dac_calib16),
+ FUNCTION(dac_calib17),
+ FUNCTION(dac_calib18),
+ FUNCTION(dac_calib19),
+ FUNCTION(dac_calib2),
+ FUNCTION(dac_calib20),
+ FUNCTION(dac_calib21),
+ FUNCTION(dac_calib22),
+ FUNCTION(dac_calib23),
+ FUNCTION(dac_calib24),
+ FUNCTION(dac_calib25),
+ FUNCTION(dac_calib3),
+ FUNCTION(dac_calib4),
+ FUNCTION(dac_calib5),
+ FUNCTION(dac_calib6),
+ FUNCTION(dac_calib7),
+ FUNCTION(dac_calib8),
+ FUNCTION(dac_calib9),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(dmic0_clk),
+ FUNCTION(dmic0_data),
+ FUNCTION(ebi_cdc),
+ FUNCTION(ebi_ch0),
+ FUNCTION(ext_lpass),
+ FUNCTION(flash_strobe),
+ FUNCTION(fp_int),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gcc_tlmm),
+ FUNCTION(gpio),
+ FUNCTION(gsm0_tx),
+ FUNCTION(gsm1_tx),
+ FUNCTION(gyro_int),
+ FUNCTION(hall_int),
+ FUNCTION(hdmi_int),
+ FUNCTION(key_focus),
+ FUNCTION(key_home),
+ FUNCTION(key_snapshot),
+ FUNCTION(key_volp),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(lpass_slimbus0),
+ FUNCTION(lpass_slimbus1),
+ FUNCTION(m_voc),
+ FUNCTION(mag_int),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mipi_dsi0),
+ FUNCTION(modem_tsync),
+ FUNCTION(mss_lte),
+ FUNCTION(nav_pps),
+ FUNCTION(nav_pps_in_a),
+ FUNCTION(nav_pps_in_b),
+ FUNCTION(nav_tsync),
+ FUNCTION(nfc_disable),
+ FUNCTION(nfc_dwl),
+ FUNCTION(nfc_irq),
+ FUNCTION(ois_sync),
+ FUNCTION(pa_indicator),
+ FUNCTION(pbs0),
+ FUNCTION(pbs1),
+ FUNCTION(pbs2),
+ FUNCTION(pressure_int),
+ FUNCTION(pri_mi2s),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(pri_mi2s_ws),
+ FUNCTION(prng_rosc),
+ FUNCTION(pwr_crypto_enabled_a),
+ FUNCTION(pwr_crypto_enabled_b),
+ FUNCTION(pwr_down),
+ FUNCTION(pwr_modem_enabled_a),
+ FUNCTION(pwr_modem_enabled_b),
+ FUNCTION(pwr_nav_enabled_a),
+ FUNCTION(pwr_nav_enabled_b),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(sd_write),
+ FUNCTION(sdcard_det),
+ FUNCTION(sec_mi2s),
+ FUNCTION(sec_mi2s_mclk_a),
+ FUNCTION(sec_mi2s_mclk_b),
+ FUNCTION(smb_int),
+ FUNCTION(ss_switch),
+ FUNCTION(ssbi_wtr1),
+ FUNCTION(ts_resout),
+ FUNCTION(ts_sample),
+ FUNCTION(ts_xvdd),
+ FUNCTION(tsens_max),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim_batt),
+ FUNCTION(us_emitter),
+ FUNCTION(us_euro),
+ FUNCTION(wcss_bt),
+ FUNCTION(wcss_fm),
+ FUNCTION(wcss_wlan),
+ FUNCTION(wcss_wlan0),
+ FUNCTION(wcss_wlan1),
+ FUNCTION(wcss_wlan2),
+ FUNCTION(wsa_en),
+ FUNCTION(wsa_io),
+ FUNCTION(wsa_irq),
+};
+
+static const struct msm_pingroup msm8953_groups[] = {
+ PINGROUP(0, blsp_spi1, _, _, _, _, _, _, _, _),
+ PINGROUP(1, blsp_spi1, adsp_ext, _, _, _, _, _, _, _),
+ PINGROUP(2, blsp_spi1, blsp_i2c1, prng_rosc, _, _, _, qdss_cti_trig_out_b0, _, _),
+ PINGROUP(3, blsp_spi1, blsp_i2c1, _, _, _, qdss_cti_trig_out_a1, _, _, _),
+ PINGROUP(4, blsp_spi2, blsp_uart2, ldo_update, _, dac_calib0, _, _, _, _),
+ PINGROUP(5, blsp_spi2, blsp_uart2, ldo_en, _, _, _, _, _, _),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, gcc_gp1_clk_b, _, _, _, _, _),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, _, atest_gpsadc_dtest0_native, _, _, _, _),
+ PINGROUP(8, blsp_spi3, _, _, qdss_tracedata_b, _, _, _, _, _),
+ PINGROUP(9, blsp_spi3, pwr_modem_enabled_b, _, _, qdss_tracedata_b, _, _, _, _),
+ PINGROUP(10, blsp_spi3, blsp_i2c3, gcc_gp2_clk_b, _, _, _, _, _, _),
+ PINGROUP(11, blsp_spi3, blsp_i2c3, gcc_gp3_clk_b, _, _, _, _, _, _),
+ PINGROUP(12, blsp_spi4, blsp_uart4, pwr_nav_enabled_b, _, _,
+ qdss_tracedata_b, _, dac_calib1, _),
+ PINGROUP(13, blsp_spi4, blsp_uart4, pwr_crypto_enabled_b, _, _, _,
+ qdss_tracedata_b, _, dac_calib2),
+ PINGROUP(14, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(15, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(16, blsp_spi5, blsp_uart5, _, _, qdss_traceclk_a, _, atest_bbrx1, _, _),
+ PINGROUP(17, blsp_spi5, blsp_uart5, m_voc, qdss_cti_trig_in_a0, _, atest_bbrx0, _, _, _),
+ PINGROUP(18, blsp_spi5, blsp_uart5, blsp_i2c5,
+ qdss_tracectl_a, _, atest_gpsadc_dtest1_native, _, _, _),
+ PINGROUP(19, blsp_spi5, blsp_uart5, blsp_i2c5, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(20, blsp_spi6, blsp_uart6, _, _, _, qdss_tracectl_b, _, dac_calib15, _),
+ PINGROUP(21, blsp_spi6, blsp_uart6, m_voc, _, _, _, qdss_cti_trig_in_b0, _, dac_calib16),
+ PINGROUP(22, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_traceclk_b, _, atest_wlan0, _, _, _),
+ PINGROUP(23, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_tracedata_b, _, atest_wlan1, _, _, _),
+ PINGROUP(24, mdp_vsync, _, _, _, _, _, _, _, _),
+ PINGROUP(25, mdp_vsync, pri_mi2s_mclk_a, sec_mi2s_mclk_a,
+ qdss_cti_trig_out_b1, _, _, _, _, _),
+ PINGROUP(26, cam_mclk, _, _, _, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(27, cam_mclk, _, _, _, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(28, cam_mclk, _, _, _, qdss_tracedata_a, _, dac_calib3, _, _),
+ PINGROUP(29, cci_i2c, pwr_modem_enabled_a, _, _, _, qdss_tracedata_a, _, dac_calib4, _),
+ PINGROUP(30, cci_i2c, _, _, _, qdss_tracedata_a, _, dac_calib19, _, _),
+ PINGROUP(31, cci_i2c, _, _, _, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(32, cci_i2c, _, _, _, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(33, cci_timer0, _, _, _, _, qdss_tracedata_a, _, _, _),
+ PINGROUP(34, cci_timer1, _, _, _, _, qdss_tracedata_a, _, _, _),
+ PINGROUP(35, cci_timer2, blsp1_spi, pwr_nav_enabled_a, _, _, _, qdss_tracedata_a, _, _),
+ PINGROUP(36, cci_timer3, blsp1_spi, _, pwr_crypto_enabled_a, _, _, _, qdss_tracedata_a, _),
+ PINGROUP(37, _, _, _, _, _, _, _, _, _),
+ PINGROUP(38, cci_async, _, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(39, _, _, _, qdss_tracedata_a, _, dac_calib5, _, _, _),
+ PINGROUP(40, _, _, qdss_tracedata_a, _, dac_calib6, _, _, _, _),
+ PINGROUP(41, cci_timer4, blsp3_spi, gcc_gp1_clk_b, _, _,
+ qdss_cti_trig_out_a0, _, dac_calib7, _),
+ PINGROUP(42, gcc_gp1_clk_a, qdss_tracedata_b, _, dac_calib8, _, _, _, _, _),
+ PINGROUP(43, gcc_gp2_clk_a, qdss_tracedata_b, _, dac_calib9, _, _, _, _, _),
+ PINGROUP(44, gcc_gp3_clk_a, qdss_tracedata_b, _, dac_calib10, _, _, _, _, _),
+ PINGROUP(45, _, qdss_tracedata_b, _, dac_calib11, _, _, _, _, _),
+ PINGROUP(46, qdss_tracedata_b, _, dac_calib12, _, _, _, _, _, _),
+ PINGROUP(47, blsp6_spi, qdss_tracedata_b, _, dac_calib13, _, _, _, _, _),
+ PINGROUP(48, blsp6_spi, _, qdss_cti_trig_in_b1, _, dac_calib14, _, _, _, _),
+ PINGROUP(49, uim_batt, _, _, _, _, _, _, _, _),
+ PINGROUP(50, blsp3_spi, sd_write, _, _, _, qdss_tracedata_a, _, _, _),
+ PINGROUP(51, uim1_data, _, _, _, _, _, _, _, _),
+ PINGROUP(52, uim1_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(53, uim1_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(54, uim1_present, _, _, _, _, _, _, _, _),
+ PINGROUP(55, uim2_data, _, _, _, _, _, _, _, _),
+ PINGROUP(56, uim2_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(57, uim2_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(58, uim2_present, _, _, _, _, _, _, _, _),
+ PINGROUP(59, _, _, _, _, _, _, _, _, _),
+ PINGROUP(60, _, _, _, _, _, _, _, _, _),
+ PINGROUP(61, _, _, _, _, _, _, _, _, _),
+ PINGROUP(62, _, _, _, _, _, _, _, _, _),
+ PINGROUP(63, atest_char3, dbg_out, bimc_dte0, _, _, _, _, _, _),
+ PINGROUP(64, _, _, _, _, _, _, _, _, _),
+ PINGROUP(65, bimc_dte0, _, _, _, _, _, _, _, _),
+ PINGROUP(66, sec_mi2s_mclk_b, pri_mi2s, _, qdss_tracedata_b, _, _, _, _, _),
+ PINGROUP(67, cdc_pdm0, atest_char1, ebi_cdc, _, dac_calib17, _, _, _, _),
+ PINGROUP(68, cdc_pdm0, atest_char0, _, _, _, _, _, _, _),
+ PINGROUP(69, cdc_pdm0, pri_mi2s_mclk_b, ebi_cdc, _, _, _, _, _, _),
+ PINGROUP(70, lpass_slimbus, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(71, lpass_slimbus0, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(72, lpass_slimbus1, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(73, cdc_pdm0, _, _, _, _, _, _, _, _),
+ PINGROUP(74, cdc_pdm0, _, _, _, _, _, _, _, _),
+ PINGROUP(75, wcss_bt, atest_char2, _, ebi_ch0, _, _, _, _, _),
+ PINGROUP(76, wcss_wlan2, _, _, _, _, _, _, _, _),
+ PINGROUP(77, wcss_wlan1, _, _, _, _, _, _, _, _),
+ PINGROUP(78, wcss_wlan0, _, _, _, _, _, _, _, _),
+ PINGROUP(79, wcss_wlan, _, _, _, _, _, _, _, _),
+ PINGROUP(80, wcss_wlan, _, _, _, _, _, _, _, _),
+ PINGROUP(81, wcss_fm, ext_lpass, _, _, _, _, _, _, _),
+ PINGROUP(82, wcss_fm, mss_lte, _, _, _, _, _, _, _),
+ PINGROUP(83, wcss_bt, mss_lte, _, _, _, _, _, _, _),
+ PINGROUP(84, wcss_bt, _, _, _, _, _, _, _, _),
+ PINGROUP(85, pbs0, cri_trng0, _, _, _, _, _, _, _),
+ PINGROUP(86, pbs1, cri_trng1, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(87, pbs2, cri_trng, qdss_tracedata_b, gcc_tlmm, _, _, _, _, _),
+ PINGROUP(88, pri_mi2s, _, _, _, qdss_tracedata_b, _, _, _, _),
+ PINGROUP(89, dmic0_clk, blsp7_spi, _, _, _, _, _, _, _),
+ PINGROUP(90, dmic0_data, blsp7_spi, _, _, _, _, _, _, _),
+ PINGROUP(91, pri_mi2s, _, _, _, qdss_cti_trig_in_a1, _, _, _, _),
+ PINGROUP(92, pri_mi2s_ws, _, _, _, qdss_tracedata_b, _, _, _, _),
+ PINGROUP(93, pri_mi2s, _, _, _, _, _, _, _, _),
+ PINGROUP(94, wsa_io, pri_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(95, wsa_io, pri_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(96, blsp_spi8, _, _, _, _, _, _, _, _),
+ PINGROUP(97, blsp_spi8, _, _, _, _, _, _, _, _),
+ PINGROUP(98, blsp_i2c8, blsp_spi8, gcc_plltest, _, _, _, _, _, _),
+ PINGROUP(99, blsp_i2c8, blsp_spi8, gcc_plltest, _, _, _, _, _, _),
+ PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ PINGROUP(102, _, _, _, _, _, _, _, _, _),
+ PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ PINGROUP(106, _, _, _, _, _, _, _, _, _),
+ PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ PINGROUP(111, _, _, nav_pps_in_a, _, _, _, _, _, _),
+ PINGROUP(112, _, pa_indicator, _, _, _, _, _, _, _),
+ PINGROUP(113, _, nav_pps_in_b, nav_pps, modem_tsync, nav_tsync, _, _, _, _),
+ PINGROUP(114, _, ssbi_wtr1, _, _, _, _, _, _, _),
+ PINGROUP(115, _, gsm1_tx, _, dac_calib18, _, _, _, _, _),
+ PINGROUP(116, _, _, _, _, _, _, _, _, _),
+ PINGROUP(117, gsm0_tx, _, _, _, _, _, _, _, _),
+ PINGROUP(118, _, ebi_cdc, _, _, _, _, _, _, _),
+ PINGROUP(119, _, ebi_cdc, _, _, _, _, _, _, _),
+ PINGROUP(120, _, atest_char, ebi_cdc, _, atest_tsens, _, _, _, _),
+ PINGROUP(121, _, _, _, bimc_dte1, _, _, _, _, _),
+ PINGROUP(122, _, _, _, bimc_dte1, _, _, _, _, _),
+ PINGROUP(123, _, ssbi_wtr1, ebi_cdc, _, _, _, _, _, _),
+ PINGROUP(124, _, _, _, _, _, _, _, _, _),
+ PINGROUP(125, _, _, _, _, _, _, _, _, _),
+ PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ PINGROUP(127, _, _, _, _, _, _, _, _, _),
+ PINGROUP(128, cam_mclk, _, dac_calib20, _, _, _, _, _, _),
+ PINGROUP(129, ddr_bist, _, dac_calib21, _, _, _, _, _, _),
+ PINGROUP(130, ddr_bist, _, dac_calib22, _, _, _, _, _, _),
+ PINGROUP(131, ddr_bist, _, dac_calib23, _, _, _, _, _, _),
+ PINGROUP(132, ddr_bist, _, dac_calib24, _, _, _, _, _, _),
+ PINGROUP(133, _, dac_calib25, _, _, _, _, _, _, _),
+ PINGROUP(134, _, _, _, _, _, _, _, _, _),
+ PINGROUP(135, sec_mi2s, blsp_spi7, blsp_i2c7, _, _, _, _, _, _),
+ PINGROUP(136, sec_mi2s, blsp_spi7, blsp_i2c7, _, _, _, _, _, _),
+ PINGROUP(137, sec_mi2s, blsp_spi7, _, _, _, _, _, _, _),
+ PINGROUP(138, sec_mi2s, blsp_spi7, _, _, _, _, _, _, _),
+ PINGROUP(139, tsens_max, _, _, _, _, _, _, _, _),
+ PINGROUP(140, _, _, _, _, _, _, _, _, _),
+ PINGROUP(141, _, _, _, _, _, _, _, _, _),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x10a000, 15, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data msm8953_pinctrl = {
+ .pins = msm8953_pins,
+ .npins = ARRAY_SIZE(msm8953_pins),
+ .functions = msm8953_functions,
+ .nfunctions = ARRAY_SIZE(msm8953_functions),
+ .groups = msm8953_groups,
+ .ngroups = ARRAY_SIZE(msm8953_groups),
+ .ngpios = 142,
+};
+
+static int msm8953_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8953_pinctrl);
+}
+
+static const struct of_device_id msm8953_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8953-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8953_pinctrl_driver = {
+ .driver = {
+ .name = "msm8953-pinctrl",
+ .of_match_table = msm8953_pinctrl_of_match,
+ },
+ .probe = msm8953_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8953_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8953_pinctrl_driver);
+}
+arch_initcall(msm8953_pinctrl_init);
+
+static void __exit msm8953_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8953_pinctrl_driver);
+}
+module_exit(msm8953_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI msm8953 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8953_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
new file mode 100644
index 000000000000..8daccd530285
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -0,0 +1,1495 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = 0x1000 * id, \
+ .io_reg = 0x1000 * id + 0x4, \
+ .intr_cfg_reg = 0x1000 * id + 0x8, \
+ .intr_status_reg = 0x1000 * id + 0xc, \
+ .intr_target_reg = 0x1000 * id + 0x8, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sc7280_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "UFS_RESET"),
+ PINCTRL_PIN(176, "SDC1_RCLK"),
+ PINCTRL_PIN(177, "SDC1_CLK"),
+ PINCTRL_PIN(178, "SDC1_CMD"),
+ PINCTRL_PIN(179, "SDC1_DATA"),
+ PINCTRL_PIN(180, "SDC2_CLK"),
+ PINCTRL_PIN(181, "SDC2_CMD"),
+ PINCTRL_PIN(182, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+
+static const unsigned int ufs_reset_pins[] = { 175 };
+static const unsigned int sdc1_rclk_pins[] = { 176 };
+static const unsigned int sdc1_clk_pins[] = { 177 };
+static const unsigned int sdc1_cmd_pins[] = { 178 };
+static const unsigned int sdc1_data_pins[] = { 179 };
+static const unsigned int sdc2_clk_pins[] = { 180 };
+static const unsigned int sdc2_cmd_pins[] = { 181 };
+static const unsigned int sdc2_data_pins[] = { 182 };
+
+enum sc7280_functions {
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_usb0,
+ msm_mux_atest_usb00,
+ msm_mux_atest_usb01,
+ msm_mux_atest_usb02,
+ msm_mux_atest_usb03,
+ msm_mux_atest_usb1,
+ msm_mux_atest_usb10,
+ msm_mux_atest_usb11,
+ msm_mux_atest_usb12,
+ msm_mux_atest_usb13,
+ msm_mux_audio_ref,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cmu_rng0,
+ msm_mux_cmu_rng1,
+ msm_mux_cmu_rng2,
+ msm_mux_cmu_rng3,
+ msm_mux_coex_uart1,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_dp_hot,
+ msm_mux_dp_lcd,
+ msm_mux_edp_hot,
+ msm_mux_edp_lcd,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gpio,
+ msm_mux_host2wlan_sol,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_lpass_slimbus,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mdp_vsync4,
+ msm_mux_mdp_vsync5,
+ msm_mux_mi2s0_data0,
+ msm_mux_mi2s0_data1,
+ msm_mux_mi2s0_sck,
+ msm_mux_mi2s0_ws,
+ msm_mux_mi2s1_data0,
+ msm_mux_mi2s1_data1,
+ msm_mux_mi2s1_sck,
+ msm_mux_mi2s1_ws,
+ msm_mux_mi2s2_data0,
+ msm_mux_mi2s2_data1,
+ msm_mux_mi2s2_sck,
+ msm_mux_mi2s2_ws,
+ msm_mux_mss_grfc0,
+ msm_mux_mss_grfc1,
+ msm_mux_mss_grfc10,
+ msm_mux_mss_grfc11,
+ msm_mux_mss_grfc12,
+ msm_mux_mss_grfc2,
+ msm_mux_mss_grfc3,
+ msm_mux_mss_grfc4,
+ msm_mux_mss_grfc5,
+ msm_mux_mss_grfc6,
+ msm_mux_mss_grfc7,
+ msm_mux_mss_grfc8,
+ msm_mux_mss_grfc9,
+ msm_mux_nav_gpio0,
+ msm_mux_nav_gpio1,
+ msm_mux_nav_gpio2,
+ msm_mux_pa_indicator,
+ msm_mux_pcie0_clkreqn,
+ msm_mux_pcie1_clkreqn,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_clk,
+ msm_mux_pll_reset,
+ msm_mux_pri_mi2s,
+ msm_mux_prng_rosc,
+ msm_mux_qdss,
+ msm_mux_qdss_cti,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qspi_data,
+ msm_mux_qup00,
+ msm_mux_qup01,
+ msm_mux_qup02,
+ msm_mux_qup03,
+ msm_mux_qup04,
+ msm_mux_qup05,
+ msm_mux_qup06,
+ msm_mux_qup07,
+ msm_mux_qup10,
+ msm_mux_qup11,
+ msm_mux_qup12,
+ msm_mux_qup13,
+ msm_mux_qup14,
+ msm_mux_qup15,
+ msm_mux_qup16,
+ msm_mux_qup17,
+ msm_mux_sd_write,
+ msm_mux_sdc40,
+ msm_mux_sdc41,
+ msm_mux_sdc42,
+ msm_mux_sdc43,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sec_mi2s,
+ msm_mux_tb_trig,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim0_clk,
+ msm_mux_uim0_data,
+ msm_mux_uim0_present,
+ msm_mux_uim0_reset,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vfr_0,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+ "gpio153", "gpio154", "gpio155", "gpio156", "gpio157", "gpio158",
+ "gpio159", "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+ "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
+ "gpio171", "gpio172", "gpio173", "gpio174",
+};
+static const char * const atest_char_groups[] = {
+ "gpio81",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio77",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio78",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio79",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio80",
+};
+static const char * const atest_usb0_groups[] = {
+ "gpio107",
+};
+static const char * const atest_usb00_groups[] = {
+ "gpio106",
+};
+static const char * const atest_usb01_groups[] = {
+ "gpio105",
+};
+static const char * const atest_usb02_groups[] = {
+ "gpio104",
+};
+static const char * const atest_usb03_groups[] = {
+ "gpio103",
+};
+static const char * const atest_usb1_groups[] = {
+ "gpio81",
+};
+static const char * const atest_usb10_groups[] = {
+ "gpio80",
+};
+static const char * const atest_usb11_groups[] = {
+ "gpio79",
+};
+static const char * const atest_usb12_groups[] = {
+ "gpio78",
+};
+static const char * const atest_usb13_groups[] = {
+ "gpio77",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio105",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio93",
+};
+static const char * const cci_async_groups[] = {
+ "gpio78", "gpio79", "gpio93",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
+ "gpio76",
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio20",
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio21",
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio77",
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio78",
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio79",
+};
+static const char * const cmu_rng0_groups[] = {
+ "gpio120",
+};
+static const char * const cmu_rng1_groups[] = {
+ "gpio119",
+};
+static const char * const cmu_rng2_groups[] = {
+ "gpio118",
+};
+static const char * const cmu_rng3_groups[] = {
+ "gpio117",
+};
+static const char * const coex_uart1_groups[] = {
+ "gpio127", "gpio128",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio124",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio121",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio122",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio38",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio14", "gpio15",
+};
+static const char * const ddr_pxi1_groups[] = {
+ "gpio12", "gpio13",
+};
+static const char * const dp_hot_groups[] = {
+ "gpio47",
+};
+static const char * const dp_lcd_groups[] = {
+ "gpio81",
+};
+static const char * const edp_hot_groups[] = {
+ "gpio60",
+};
+static const char * const edp_lcd_groups[] = {
+ "gpio46",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio76", "gpio105",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio77", "gpio106",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio78", "gpio107",
+};
+static const char * const host2wlan_sol_groups[] = {
+ "gpio26",
+};
+static const char * const ibi_i3c_groups[] = {
+ "gpio0", "gpio1", "gpio4", "gpio5", "gpio36", "gpio37",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio79",
+};
+static const char * const lpass_slimbus_groups[] = {
+ "gpio94", "gpio95",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio14", "gpio16", "gpio79", "gpio80", "gpio81",
+};
+static const char * const mdp_vsync0_groups[] = {
+ "gpio80",
+};
+static const char * const mdp_vsync1_groups[] = {
+ "gpio80",
+};
+static const char * const mdp_vsync2_groups[] = {
+ "gpio81",
+};
+static const char * const mdp_vsync3_groups[] = {
+ "gpio81",
+};
+static const char * const mdp_vsync4_groups[] = {
+ "gpio80",
+};
+static const char * const mdp_vsync5_groups[] = {
+ "gpio81",
+};
+static const char * const mi2s0_data0_groups[] = {
+ "gpio98",
+};
+static const char * const mi2s0_data1_groups[] = {
+ "gpio99",
+};
+static const char * const mi2s0_sck_groups[] = {
+ "gpio97",
+};
+static const char * const mi2s0_ws_groups[] = {
+ "gpio100",
+};
+static const char * const mi2s1_data0_groups[] = {
+ "gpio107",
+};
+static const char * const mi2s1_data1_groups[] = {
+ "gpio105",
+};
+static const char * const mi2s1_sck_groups[] = {
+ "gpio106",
+};
+static const char * const mi2s1_ws_groups[] = {
+ "gpio108",
+};
+static const char * const mi2s2_data0_groups[] = {
+ "gpio102",
+};
+static const char * const mi2s2_data1_groups[] = {
+ "gpio104",
+};
+static const char * const mi2s2_sck_groups[] = {
+ "gpio101",
+};
+static const char * const mi2s2_ws_groups[] = {
+ "gpio103",
+};
+static const char * const mss_grfc0_groups[] = {
+ "gpio117", "gpio132",
+};
+static const char * const mss_grfc1_groups[] = {
+ "gpio118",
+};
+static const char * const mss_grfc10_groups[] = {
+ "gpio127",
+};
+static const char * const mss_grfc11_groups[] = {
+ "gpio128",
+};
+static const char * const mss_grfc12_groups[] = {
+ "gpio131",
+};
+static const char * const mss_grfc2_groups[] = {
+ "gpio119",
+};
+static const char * const mss_grfc3_groups[] = {
+ "gpio120",
+};
+static const char * const mss_grfc4_groups[] = {
+ "gpio121",
+};
+static const char * const mss_grfc5_groups[] = {
+ "gpio122",
+};
+static const char * const mss_grfc6_groups[] = {
+ "gpio123",
+};
+static const char * const mss_grfc7_groups[] = {
+ "gpio124",
+};
+static const char * const mss_grfc8_groups[] = {
+ "gpio125",
+};
+static const char * const mss_grfc9_groups[] = {
+ "gpio126",
+};
+static const char * const nav_gpio0_groups[] = {
+ "gpio129",
+};
+static const char * const nav_gpio1_groups[] = {
+ "gpio130",
+};
+static const char * const nav_gpio2_groups[] = {
+ "gpio131",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio131",
+};
+static const char * const pcie0_clkreqn_groups[] = {
+ "gpio88",
+};
+static const char * const pcie1_clkreqn_groups[] = {
+ "gpio79",
+};
+static const char * const phase_flag_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15", "gpio16",
+ "gpio17", "gpio18", "gpio19", "gpio56", "gpio57",
+ "gpio58", "gpio59", "gpio60", "gpio61", "gpio62",
+ "gpio63", "gpio117", "gpio118", "gpio119", "gpio120",
+ "gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
+ "gpio126", "gpio127", "gpio128", "gpio129", "gpio130",
+ "gpio131", "gpio132",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio80",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio66",
+};
+static const char * const pll_clk_groups[] = {
+ "gpio140",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio67",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio96",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio123",
+};
+static const char * const qdss_groups[] = {
+ "gpio2", "gpio3", "gpio8", "gpio9", "gpio10",
+ "gpio11", "gpio12", "gpio13", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26",
+ "gpio27", "gpio28", "gpio29", "gpio58", "gpio59",
+ "gpio101", "gpio102", "gpio103", "gpio104", "gpio105",
+ "gpio106", "gpio107", "gpio108", "gpio150", "gpio151",
+ "gpio152", "gpio153", "gpio171", "gpio172", "gpio173",
+ "gpio174",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio15", "gpio16", "gpio18", "gpio19", "gpio156", "gpio157",
+ "gpio165", "gpio166",
+};
+static const char * const qlink0_enable_groups[] = {
+ "gpio134",
+};
+static const char * const qlink0_request_groups[] = {
+ "gpio133",
+};
+static const char * const qlink0_wmss_groups[] = {
+ "gpio135",
+};
+static const char * const qlink1_enable_groups[] = {
+ "gpio137",
+};
+static const char * const qlink1_request_groups[] = {
+ "gpio136",
+};
+static const char * const qlink1_wmss_groups[] = {
+ "gpio138",
+};
+static const char * const qspi_clk_groups[] = {
+ "gpio14",
+};
+static const char * const qspi_cs_groups[] = {
+ "gpio15", "gpio19",
+};
+static const char * const qspi_data_groups[] = {
+ "gpio12", "gpio13", "gpio16", "gpio17",
+};
+static const char * const qup00_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qup01_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const qup02_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const qup03_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const qup04_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const qup05_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const qup06_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+static const char * const qup07_groups[] = {
+ "gpio2", "gpio3", "gpio6", "gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char * const qup10_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+static const char * const qup11_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const qup12_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+static const char * const qup13_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+static const char * const qup14_groups[] = {
+ "gpio38", "gpio48", "gpio49", "gpio50", "gpio51", "gpio54", "gpio55",
+};
+static const char * const qup15_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+static const char * const qup16_groups[] = {
+ "gpio50", "gpio56", "gpio57", "gpio58", "gpio59", "gpio62", "gpio63",
+};
+static const char * const qup17_groups[] = {
+ "gpio60", "gpio61", "gpio62", "gpio63",
+};
+static const char * const sd_write_groups[] = {
+ "gpio61",
+};
+static const char * const sdc40_groups[] = {
+ "gpio12",
+};
+static const char * const sdc41_groups[] = {
+ "gpio13",
+};
+static const char * const sdc42_groups[] = {
+ "gpio16",
+};
+static const char * const sdc43_groups[] = {
+ "gpio17",
+};
+static const char * const sdc4_clk_groups[] = {
+ "gpio14",
+};
+static const char * const sdc4_cmd_groups[] = {
+ "gpio19",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio105",
+};
+static const char * const tb_trig_groups[] = {
+ "gpio12", "gpio13", "gpio15",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio65",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio66",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio61",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio61",
+};
+static const char * const uim0_clk_groups[] = {
+ "gpio114",
+};
+static const char * const uim0_data_groups[] = {
+ "gpio113",
+};
+static const char * const uim0_present_groups[] = {
+ "gpio116",
+};
+static const char * const uim0_reset_groups[] = {
+ "gpio115",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio110",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio109",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio112",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio111",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio84", "gpio85",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio140",
+};
+static const char * const vfr_0_groups[] = {
+ "gpio80",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio103",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio100",
+};
+
+static const struct msm_function sc7280_functions[] = {
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_usb0),
+ FUNCTION(atest_usb00),
+ FUNCTION(atest_usb01),
+ FUNCTION(atest_usb02),
+ FUNCTION(atest_usb03),
+ FUNCTION(atest_usb1),
+ FUNCTION(atest_usb10),
+ FUNCTION(atest_usb11),
+ FUNCTION(atest_usb12),
+ FUNCTION(atest_usb13),
+ FUNCTION(audio_ref),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cmu_rng0),
+ FUNCTION(cmu_rng1),
+ FUNCTION(cmu_rng2),
+ FUNCTION(cmu_rng3),
+ FUNCTION(coex_uart1),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(dp_hot),
+ FUNCTION(dp_lcd),
+ FUNCTION(edp_hot),
+ FUNCTION(edp_lcd),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gpio),
+ FUNCTION(host2wlan_sol),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mdp_vsync4),
+ FUNCTION(mdp_vsync5),
+ FUNCTION(mi2s0_data0),
+ FUNCTION(mi2s0_data1),
+ FUNCTION(mi2s0_sck),
+ FUNCTION(mi2s0_ws),
+ FUNCTION(mi2s1_data0),
+ FUNCTION(mi2s1_data1),
+ FUNCTION(mi2s1_sck),
+ FUNCTION(mi2s1_ws),
+ FUNCTION(mi2s2_data0),
+ FUNCTION(mi2s2_data1),
+ FUNCTION(mi2s2_sck),
+ FUNCTION(mi2s2_ws),
+ FUNCTION(mss_grfc0),
+ FUNCTION(mss_grfc1),
+ FUNCTION(mss_grfc10),
+ FUNCTION(mss_grfc11),
+ FUNCTION(mss_grfc12),
+ FUNCTION(mss_grfc2),
+ FUNCTION(mss_grfc3),
+ FUNCTION(mss_grfc4),
+ FUNCTION(mss_grfc5),
+ FUNCTION(mss_grfc6),
+ FUNCTION(mss_grfc7),
+ FUNCTION(mss_grfc8),
+ FUNCTION(mss_grfc9),
+ FUNCTION(nav_gpio0),
+ FUNCTION(nav_gpio1),
+ FUNCTION(nav_gpio2),
+ FUNCTION(pa_indicator),
+ FUNCTION(pcie0_clkreqn),
+ FUNCTION(pcie1_clkreqn),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_clk),
+ FUNCTION(pll_reset),
+ FUNCTION(pri_mi2s),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss),
+ FUNCTION(qdss_cti),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(qspi_data),
+ FUNCTION(qup00),
+ FUNCTION(qup01),
+ FUNCTION(qup02),
+ FUNCTION(qup03),
+ FUNCTION(qup04),
+ FUNCTION(qup05),
+ FUNCTION(qup06),
+ FUNCTION(qup07),
+ FUNCTION(qup10),
+ FUNCTION(qup11),
+ FUNCTION(qup12),
+ FUNCTION(qup13),
+ FUNCTION(qup14),
+ FUNCTION(qup15),
+ FUNCTION(qup16),
+ FUNCTION(qup17),
+ FUNCTION(sdc40),
+ FUNCTION(sdc41),
+ FUNCTION(sdc42),
+ FUNCTION(sdc43),
+ FUNCTION(sdc4_clk),
+ FUNCTION(sdc4_cmd),
+ FUNCTION(sd_write),
+ FUNCTION(sec_mi2s),
+ FUNCTION(tb_trig),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(uim0_clk),
+ FUNCTION(uim0_data),
+ FUNCTION(uim0_present),
+ FUNCTION(uim0_reset),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_0),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sc7280_groups[] = {
+ [0] = PINGROUP(0, qup00, ibi_i3c, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, qup00, ibi_i3c, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup00, qup07, _, qdss, _, _, _, _, _),
+ [3] = PINGROUP(3, qup00, qup07, _, qdss, _, _, _, _, _),
+ [4] = PINGROUP(4, qup01, ibi_i3c, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup01, ibi_i3c, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup01, qup07, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qup01, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, qup02, _, qdss, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qup02, _, qdss, _, _, _, _, _, _),
+ [10] = PINGROUP(10, qup02, _, qdss, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup02, _, qdss, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup03, qspi_data, sdc40, tb_trig, phase_flag, qdss, ddr_pxi1, _, _),
+ [13] = PINGROUP(13, qup03, qspi_data, sdc41, tb_trig, phase_flag, qdss, ddr_pxi1, _, _),
+ [14] = PINGROUP(14, qup03, qspi_clk, sdc4_clk, mdp_vsync, phase_flag, ddr_pxi0, _, _, _),
+ [15] = PINGROUP(15, qup03, qspi_cs, tb_trig, phase_flag, qdss_cti, ddr_pxi0, _, _, _),
+ [16] = PINGROUP(16, qup04, qspi_data, sdc42, mdp_vsync, phase_flag, qdss_cti, _, _, _),
+ [17] = PINGROUP(17, qup04, qspi_data, sdc43, _, phase_flag, _, _, _, _),
+ [18] = PINGROUP(18, qup04, _, phase_flag, qdss_cti, _, _, _, _, _),
+ [19] = PINGROUP(19, qup04, qspi_cs, sdc4_cmd, _, phase_flag, qdss_cti, _, _, _),
+ [20] = PINGROUP(20, qup05, cci_timer0, _, qdss, _, _, _, _, _),
+ [21] = PINGROUP(21, qup05, cci_timer1, _, qdss, _, _, _, _, _),
+ [22] = PINGROUP(22, qup05, _, qdss, _, _, _, _, _, _),
+ [23] = PINGROUP(23, qup05, _, qdss, _, _, _, _, _, _),
+ [24] = PINGROUP(24, qup06, _, qdss, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup06, _, qdss, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup06, host2wlan_sol, _, qdss, _, _, _, _, _),
+ [27] = PINGROUP(27, qup06, _, qdss, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qup07, _, qdss, _, _, _, _, _, _),
+ [29] = PINGROUP(29, qup07, qdss, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, qup07, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, qup07, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup10, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup10, _, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup10, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup10, _, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup11, ibi_i3c, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup11, ibi_i3c, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup11, qup14, dbg_out, _, _, _, _, _, _),
+ [39] = PINGROUP(39, qup11, _, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup12, _, _, _, _, _, _, _, _),
+ [41] = PINGROUP(41, qup12, _, _, _, _, _, _, _, _),
+ [42] = PINGROUP(42, qup12, _, _, _, _, _, _, _, _),
+ [43] = PINGROUP(43, qup12, _, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, qup13, _, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, qup13, _, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, qup13, edp_lcd, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, qup13, dp_hot, _, _, _, _, _, _, _),
+ [48] = PINGROUP(48, qup14, _, _, _, _, _, _, _, _),
+ [49] = PINGROUP(49, qup14, _, _, _, _, _, _, _, _),
+ [50] = PINGROUP(50, qup14, qup16, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, qup14, _, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, qup15, _, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, qup15, _, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qup15, qup14, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qup15, qup14, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup16, ddr_bist, phase_flag, _, _, _, _, _, _),
+ [57] = PINGROUP(57, qup16, ddr_bist, phase_flag, _, _, _, _, _, _),
+ [58] = PINGROUP(58, qup16, ddr_bist, phase_flag, qdss, _, _, _, _, _),
+ [59] = PINGROUP(59, qup16, ddr_bist, phase_flag, qdss, _, _, _, _, _),
+ [60] = PINGROUP(60, qup17, edp_hot, _, phase_flag, _, _, _, _, _),
+ [61] = PINGROUP(61, qup17, sd_write, phase_flag, tsense_pwm1, tsense_pwm2, _, _, _, _),
+ [62] = PINGROUP(62, qup17, qup16, phase_flag, _, _, _, _, _, _),
+ [63] = PINGROUP(63, qup17, qup16, phase_flag, _, _, _, _, _, _),
+ [64] = PINGROUP(64, cam_mclk, _, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, cam_mclk, tgu_ch0, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, cam_mclk, pll_bypassnl, tgu_ch1, _, _, _, _, _, _),
+ [67] = PINGROUP(67, cam_mclk, pll_reset, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, cam_mclk, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, cci_i2c, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, cci_i2c, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, cci_i2c, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, cci_i2c, _, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, cci_i2c, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, cci_i2c, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, cci_i2c, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, cci_i2c, gcc_gp1, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, cci_timer2, gcc_gp2, _, atest_usb13, atest_char0, _, _, _, _),
+ [78] = PINGROUP(78, cci_timer3, cci_async, gcc_gp3, _, atest_usb12, atest_char1, _, _, _),
+ [79] = PINGROUP(79, cci_timer4, cci_async, pcie1_clkreqn, mdp_vsync, jitter_bist, atest_usb11, atest_char2, _, _),
+ [80] = PINGROUP(80, mdp_vsync, vfr_0, mdp_vsync0, mdp_vsync1, mdp_vsync4, pll_bist, atest_usb10, atest_char3, _),
+ [81] = PINGROUP(81, mdp_vsync, dp_lcd, mdp_vsync2, mdp_vsync3, mdp_vsync5, atest_usb1, atest_char, _, _),
+ [82] = PINGROUP(82, _, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, _, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, usb2phy_ac, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, usb2phy_ac, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, _, _, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, _, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, pcie0_clkreqn, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, _, _, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, _, _, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, _, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, _, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, cam_mclk, cci_async, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, lpass_slimbus, _, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, lpass_slimbus, _, _, _, _, _, _, _, _),
+ [96] = PINGROUP(96, pri_mi2s, _, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, mi2s0_sck, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, mi2s0_data0, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, mi2s0_data1, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, mi2s0_ws, _, vsense_trigger, _, _, _, _, _, _),
+ [101] = PINGROUP(101, mi2s2_sck, _, qdss, _, _, _, _, _, _),
+ [102] = PINGROUP(102, mi2s2_data0, _, _, qdss, _, _, _, _, _),
+ [103] = PINGROUP(103, mi2s2_ws, vfr_1, _, _, qdss, _, atest_usb03, _, _),
+ [104] = PINGROUP(104, mi2s2_data1, _, _, qdss, _, atest_usb02, _, _, _),
+ [105] = PINGROUP(105, sec_mi2s, mi2s1_data1, audio_ref, gcc_gp1, _, qdss, atest_usb01, _, _),
+ [106] = PINGROUP(106, mi2s1_sck, gcc_gp2, _, qdss, atest_usb00, _, _, _, _),
+ [107] = PINGROUP(107, mi2s1_data0, gcc_gp3, _, qdss, atest_usb0, _, _, _, _),
+ [108] = PINGROUP(108, mi2s1_ws, _, qdss, _, _, _, _, _, _),
+ [109] = PINGROUP(109, uim1_data, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, uim1_clk, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, uim1_reset, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, uim1_present, _, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, uim0_data, _, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, uim0_clk, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, uim0_reset, _, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, uim0_present, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, mss_grfc0, cmu_rng3, phase_flag, _, _, _, _, _),
+ [118] = PINGROUP(118, _, mss_grfc1, cmu_rng2, phase_flag, _, _, _, _, _),
+ [119] = PINGROUP(119, _, mss_grfc2, cmu_rng1, phase_flag, _, _, _, _, _),
+ [120] = PINGROUP(120, _, mss_grfc3, cmu_rng0, phase_flag, _, _, _, _, _),
+ [121] = PINGROUP(121, _, mss_grfc4, cri_trng0, phase_flag, _, _, _, _, _),
+ [122] = PINGROUP(122, _, mss_grfc5, cri_trng1, phase_flag, _, _, _, _, _),
+ [123] = PINGROUP(123, _, mss_grfc6, prng_rosc, phase_flag, _, _, _, _, _),
+ [124] = PINGROUP(124, _, mss_grfc7, cri_trng, phase_flag, _, _, _, _, _),
+ [125] = PINGROUP(125, _, mss_grfc8, phase_flag, _, _, _, _, _, _),
+ [126] = PINGROUP(126, _, mss_grfc9, phase_flag, _, _, _, _, _, _),
+ [127] = PINGROUP(127, coex_uart1, mss_grfc10, phase_flag, _, _, _, _, _, _),
+ [128] = PINGROUP(128, coex_uart1, mss_grfc11, phase_flag, _, _, _, _, _, _),
+ [129] = PINGROUP(129, nav_gpio0, phase_flag, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, nav_gpio1, phase_flag, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, mss_grfc12, nav_gpio2, pa_indicator, phase_flag, _, _, _, _, _),
+ [132] = PINGROUP(132, mss_grfc0, phase_flag, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, qlink0_request, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, qlink0_enable, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, qlink1_request, _, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, qlink1_enable, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, usb_phy, pll_clk, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, qdss, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, qdss, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, qdss, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, qdss, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, qdss_cti, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, qdss_cti, _, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, _, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, _, _, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, _, _, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, _, _, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, _, _, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, _, _, _, _, _, _, _, _, _),
+ [164] = PINGROUP(164, _, _, _, _, _, _, _, _, _),
+ [165] = PINGROUP(165, qdss_cti, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, qdss_cti, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, qdss, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
+ [175] = UFS_RESET(ufs_reset, 0x1be000),
+ [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0),
+ [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6),
+ [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3),
+ [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0),
+ [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6),
+ [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3),
+ [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
+ .pins = sc7280_pins,
+ .npins = ARRAY_SIZE(sc7280_pins),
+ .functions = sc7280_functions,
+ .nfunctions = ARRAY_SIZE(sc7280_functions),
+ .groups = sc7280_groups,
+ .ngroups = ARRAY_SIZE(sc7280_groups),
+ .ngpios = 176,
+};
+
+static int sc7280_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sc7280_pinctrl);
+}
+
+static const struct of_device_id sc7280_pinctrl_of_match[] = {
+ { .compatible = "qcom,sc7280-pinctrl", },
+ { },
+};
+
+static struct platform_driver sc7280_pinctrl_driver = {
+ .driver = {
+ .name = "sc7280-pinctrl",
+ .of_match_table = sc7280_pinctrl_of_match,
+ },
+ .probe = sc7280_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sc7280_pinctrl_init(void)
+{
+ return platform_driver_register(&sc7280_pinctrl_driver);
+}
+arch_initcall(sc7280_pinctrl_init);
+
+static void __exit sc7280_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sc7280_pinctrl_driver);
+}
+module_exit(sc7280_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sc7280 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sc7280_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
new file mode 100644
index 000000000000..2b5b0e2b03ad
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -0,0 +1,1018 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sdx55_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "SDC1_RCLK"),
+ PINCTRL_PIN(109, "SDC1_CLK"),
+ PINCTRL_PIN(110, "SDC1_CMD"),
+ PINCTRL_PIN(111, "SDC1_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+
+static const unsigned int sdc1_rclk_pins[] = { 108 };
+static const unsigned int sdc1_clk_pins[] = { 109 };
+static const unsigned int sdc1_cmd_pins[] = { 110 };
+static const unsigned int sdc1_data_pins[] = { 111 };
+
+enum sdx55_functions {
+ msm_mux_adsp_ext,
+ msm_mux_atest,
+ msm_mux_audio_ref,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart3,
+ msm_mux_blsp_uart4,
+ msm_mux_char_exec,
+ msm_mux_coex_uart,
+ msm_mux_coex_uart2,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ebi0_wrcdc,
+ msm_mux_ebi2_a,
+ msm_mux_ebi2_lcd,
+ msm_mux_emac_gcc0,
+ msm_mux_emac_gcc1,
+ msm_mux_emac_pps0,
+ msm_mux_emac_pps1,
+ msm_mux_ext_dbg,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gcc_plltest,
+ msm_mux_gpio,
+ msm_mux_i2s_mclk,
+ msm_mux_jitter_bist,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_mgpi_clk,
+ msm_mux_m_voc,
+ msm_mux_native_char,
+ msm_mux_native_char0,
+ msm_mux_native_char1,
+ msm_mux_native_char2,
+ msm_mux_native_char3,
+ msm_mux_native_tsens,
+ msm_mux_native_tsense,
+ msm_mux_nav_gpio,
+ msm_mux_pa_indicator,
+ msm_mux_pcie_clkreq,
+ msm_mux_pci_e,
+ msm_mux_pll_bist,
+ msm_mux_pll_ref,
+ msm_mux_pll_test,
+ msm_mux_pri_mi2s,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qdss_stm,
+ msm_mux_qlink0_en,
+ msm_mux_qlink0_req,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_en,
+ msm_mux_qlink1_req,
+ msm_mux_qlink1_wmss,
+ msm_mux_spmi_coex,
+ msm_mux_sec_mi2s,
+ msm_mux_spmi_vgi,
+ msm_mux_tgu_ch0,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
+ "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
+ "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
+ "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
+ "gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
+ "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
+ "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
+ "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
+ "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+};
+
+static const char * const qdss_stm_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
+ "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62",
+ "gpio63", "gpio64", "gpio65", "gpio66",
+};
+
+static const char * const ddr_pxi0_groups[] = {
+ "gpio45", "gpio46",
+};
+
+static const char * const m_voc_groups[] = {
+ "gpio46", "gpio48", "gpio49", "gpio59", "gpio60",
+};
+
+static const char * const ddr_bist_groups[] = {
+ "gpio46", "gpio47", "gpio48", "gpio49",
+};
+
+static const char * const blsp_spi1_groups[] = {
+ "gpio52", "gpio62", "gpio71", "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char * const pci_e_groups[] = {
+ "gpio53",
+};
+
+static const char * const tgu_ch0_groups[] = {
+ "gpio55",
+};
+
+static const char * const pcie_clkreq_groups[] = {
+ "gpio56",
+};
+
+static const char * const mgpi_clk_groups[] = {
+ "gpio61", "gpio71",
+};
+
+static const char * const i2s_mclk_groups[] = {
+ "gpio62",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio62",
+};
+
+static const char * const ldo_update_groups[] = {
+ "gpio62",
+};
+
+static const char * const atest_groups[] = {
+ "gpio63", "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char * const uim1_data_groups[] = {
+ "gpio67",
+};
+
+static const char * const uim1_present_groups[] = {
+ "gpio68",
+};
+
+static const char * const uim1_reset_groups[] = {
+ "gpio69",
+};
+
+static const char * const uim1_clk_groups[] = {
+ "gpio70",
+};
+
+static const char * const qlink1_en_groups[] = {
+ "gpio72",
+};
+
+static const char * const qlink1_req_groups[] = {
+ "gpio73",
+};
+
+static const char * const qlink1_wmss_groups[] = {
+ "gpio74",
+};
+
+static const char * const coex_uart2_groups[] = {
+ "gpio75", "gpio76",
+};
+
+static const char * const spmi_vgi_groups[] = {
+ "gpio78", "gpio79",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio81", "gpio82",
+};
+
+static const char * const usb2phy_ac_groups[] = {
+ "gpio93",
+};
+
+static const char * const emac_pps1_groups[] = {
+ "gpio95",
+};
+
+static const char * const emac_pps0_groups[] = {
+ "gpio106",
+};
+
+static const char * const uim2_data_groups[] = {
+ "gpio0",
+};
+
+static const char * const ebi0_wrcdc_groups[] = {
+ "gpio0", "gpio2",
+};
+
+static const char * const uim2_present_groups[] = {
+ "gpio1",
+};
+
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio20", "gpio21", "gpio22",
+ "gpio23",
+};
+
+static const char * const uim2_reset_groups[] = {
+ "gpio2",
+};
+
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3", "gpio82", "gpio83",
+};
+
+static const char * const uim2_clk_groups[] = {
+ "gpio3",
+};
+
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio52", "gpio62", "gpio71",
+};
+
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio63", "gpio64", "gpio65",
+ "gpio66",
+};
+
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7", "gpio65", "gpio66",
+};
+
+static const char * const char_exec_groups[] = {
+ "gpio6", "gpio7",
+};
+
+static const char * const pri_mi2s_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15",
+};
+
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio52", "gpio62", "gpio71",
+};
+
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const ext_dbg_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const ldo_en_groups[] = {
+ "gpio8",
+};
+
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+
+static const char * const gcc_gp3_groups[] = {
+ "gpio11",
+};
+
+static const char * const emac_gcc1_groups[] = {
+ "gpio14",
+};
+
+static const char * const bimc_dte0_groups[] = {
+ "gpio14", "gpio59",
+};
+
+static const char * const native_tsens_groups[] = {
+ "gpio14",
+};
+
+static const char * const vsense_trigger_groups[] = {
+ "gpio14",
+};
+
+static const char * const emac_gcc0_groups[] = {
+ "gpio15",
+};
+
+static const char * const bimc_dte1_groups[] = {
+ "gpio15", "gpio61",
+};
+
+static const char * const sec_mi2s_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23",
+};
+
+static const char * const blsp_spi4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio52", "gpio62", "gpio71",
+};
+
+static const char * const blsp_uart4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23",
+};
+
+static const char * const qdss_cti_groups[] = {
+ "gpio16", "gpio16", "gpio17", "gpio17", "gpio22", "gpio22", "gpio23",
+ "gpio23", "gpio54", "gpio54", "gpio55", "gpio55", "gpio59", "gpio60",
+ "gpio94", "gpio94", "gpio95", "gpio95",
+};
+
+static const char * const blsp_i2c4_groups[] = {
+ "gpio18", "gpio19", "gpio78", "gpio79",
+};
+
+static const char * const gcc_gp1_groups[] = {
+ "gpio18",
+};
+
+static const char * const jitter_bist_groups[] = {
+ "gpio19",
+};
+
+static const char * const gcc_gp2_groups[] = {
+ "gpio19",
+};
+
+static const char * const ebi2_a_groups[] = {
+ "gpio20",
+};
+
+static const char * const ebi2_lcd_groups[] = {
+ "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const pll_bist_groups[] = {
+ "gpio22",
+};
+
+static const char * const adsp_ext_groups[] = {
+ "gpio24", "gpio25",
+};
+
+static const char * const native_char_groups[] = {
+ "gpio26",
+};
+
+static const char * const qlink0_wmss_groups[] = {
+ "gpio28",
+};
+
+static const char * const native_char3_groups[] = {
+ "gpio28",
+};
+
+static const char * const native_char2_groups[] = {
+ "gpio29",
+};
+
+static const char * const native_tsense_groups[] = {
+ "gpio29",
+};
+
+static const char * const nav_gpio_groups[] = {
+ "gpio31", "gpio32", "gpio76",
+};
+
+static const char * const pll_ref_groups[] = {
+ "gpio32",
+};
+
+static const char * const pa_indicator_groups[] = {
+ "gpio33",
+};
+
+static const char * const native_char0_groups[] = {
+ "gpio33",
+};
+
+static const char * const qlink0_en_groups[] = {
+ "gpio34",
+};
+
+static const char * const qlink0_req_groups[] = {
+ "gpio35",
+};
+
+static const char * const pll_test_groups[] = {
+ "gpio35",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio36",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio36",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio38",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio40",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio41",
+};
+
+static const char * const qdss_gpio_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio12", "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio42", "gpio61", "gpio63", "gpio64", "gpio65", "gpio66",
+};
+
+static const char * const native_char1_groups[] = {
+ "gpio42",
+};
+
+static const char * const coex_uart_groups[] = {
+ "gpio44", "gpio45",
+};
+
+static const char * const spmi_coex_groups[] = {
+ "gpio44", "gpio45",
+};
+
+static const struct msm_function sdx55_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(atest),
+ FUNCTION(audio_ref),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart3),
+ FUNCTION(blsp_uart4),
+ FUNCTION(char_exec),
+ FUNCTION(coex_uart),
+ FUNCTION(coex_uart2),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(ebi2_a),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(emac_gcc0),
+ FUNCTION(emac_gcc1),
+ FUNCTION(emac_pps0),
+ FUNCTION(emac_pps1),
+ FUNCTION(ext_dbg),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gpio),
+ FUNCTION(i2s_mclk),
+ FUNCTION(jitter_bist),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(mgpi_clk),
+ FUNCTION(m_voc),
+ FUNCTION(native_char),
+ FUNCTION(native_char0),
+ FUNCTION(native_char1),
+ FUNCTION(native_char2),
+ FUNCTION(native_char3),
+ FUNCTION(native_tsens),
+ FUNCTION(native_tsense),
+ FUNCTION(nav_gpio),
+ FUNCTION(pa_indicator),
+ FUNCTION(pcie_clkreq),
+ FUNCTION(pci_e),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_ref),
+ FUNCTION(pll_test),
+ FUNCTION(pri_mi2s),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qdss_stm),
+ FUNCTION(qlink0_en),
+ FUNCTION(qlink0_req),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_en),
+ FUNCTION(qlink1_req),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(spmi_coex),
+ FUNCTION(sec_mi2s),
+ FUNCTION(spmi_vgi),
+ FUNCTION(tgu_ch0),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(vsense_trigger),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sdx55_groups[] = {
+ [0] = PINGROUP(0, uim2_data, blsp_uart1, qdss_stm, ebi0_wrcdc, _, _, _, _, _),
+ [1] = PINGROUP(1, uim2_present, blsp_uart1, qdss_stm, _, _, _, _, _, _),
+ [2] = PINGROUP(2, uim2_reset, blsp_uart1, blsp_i2c1, qdss_stm, ebi0_wrcdc, _, _, _, _),
+ [3] = PINGROUP(3, uim2_clk, blsp_uart1, blsp_i2c1, qdss_stm, _, _, _, _, _),
+ [4] = PINGROUP(4, blsp_spi2, blsp_uart2, _, qdss_stm, qdss_gpio, _, _, _, _),
+ [5] = PINGROUP(5, blsp_spi2, blsp_uart2, _, qdss_stm, qdss_gpio, _, _, _, _),
+ [6] = PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, char_exec, _, qdss_stm, qdss_gpio, _, _),
+ [7] = PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, char_exec, _, qdss_stm, qdss_gpio, _, _),
+ [8] = PINGROUP(8, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, ldo_en, _, _, _, _),
+ [9] = PINGROUP(9, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, _, _, _, _, _),
+ [10] = PINGROUP(10, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg, _, _, _, _),
+ [11] = PINGROUP(11, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg, gcc_gp3, _, _, _),
+ [12] = PINGROUP(12, pri_mi2s, _, qdss_stm, qdss_gpio, _, _, _, _, _),
+ [13] = PINGROUP(13, pri_mi2s, _, qdss_stm, qdss_gpio, _, _, _, _, _),
+ [14] = PINGROUP(14, pri_mi2s, emac_gcc1, _, _, qdss_stm, qdss_gpio, bimc_dte0, native_tsens, vsense_trigger),
+ [15] = PINGROUP(15, pri_mi2s, emac_gcc0, _, _, qdss_stm, qdss_gpio, bimc_dte1, _, _),
+ [16] = PINGROUP(16, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, _, _, qdss_stm, qdss_gpio),
+ [17] = PINGROUP(17, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, _, qdss_stm, qdss_gpio, _),
+ [18] = PINGROUP(18, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, gcc_gp1, qdss_stm, qdss_gpio, _, _),
+ [19] = PINGROUP(19, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, jitter_bist, gcc_gp2, _, qdss_stm, qdss_gpio),
+ [20] = PINGROUP(20, sec_mi2s, ebi2_a, blsp_uart1, blsp_uart4, qdss_stm, _, _, _, _),
+ [21] = PINGROUP(21, sec_mi2s, ebi2_lcd, blsp_uart1, blsp_uart4, _, qdss_stm, _, _, _),
+ [22] = PINGROUP(22, sec_mi2s, ebi2_lcd, blsp_uart1, qdss_cti, qdss_cti, blsp_uart4, pll_bist, _, qdss_stm),
+ [23] = PINGROUP(23, sec_mi2s, ebi2_lcd, qdss_cti, qdss_cti, blsp_uart1, blsp_uart4, qdss_stm, _, _),
+ [24] = PINGROUP(24, adsp_ext, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, adsp_ext, _, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, _, _, _, native_char, _, _, _, _, _),
+ [27] = PINGROUP(27, _, _, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qlink0_wmss, _, native_char3, _, _, _, _, _, _),
+ [29] = PINGROUP(29, _, _, _, native_char2, native_tsense, _, _, _, _),
+ [30] = PINGROUP(30, _, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, nav_gpio, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, nav_gpio, pll_ref, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, _, pa_indicator, native_char0, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qlink0_en, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qlink0_req, pll_test, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, _, _, cri_trng, dbg_out, _, _, _, _, _),
+ [37] = PINGROUP(37, _, _, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, _, _, prng_rosc, _, _, _, _, _, _),
+ [39] = PINGROUP(39, _, _, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, _, _, cri_trng0, _, _, _, _, _, _),
+ [41] = PINGROUP(41, _, _, cri_trng1, _, _, _, _, _, _),
+ [42] = PINGROUP(42, _, qdss_gpio, native_char1, _, _, _, _, _, _),
+ [43] = PINGROUP(43, _, _, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, coex_uart, spmi_coex, _, qdss_stm, _, _, _, _, _),
+ [45] = PINGROUP(45, coex_uart, spmi_coex, qdss_stm, ddr_pxi0, _, _, _, _, _),
+ [46] = PINGROUP(46, m_voc, ddr_bist, ddr_pxi0, _, _, _, _, _, _),
+ [47] = PINGROUP(47, ddr_bist, _, _, _, _, _, _, _, _),
+ [48] = PINGROUP(48, m_voc, ddr_bist, _, _, _, _, _, _, _),
+ [49] = PINGROUP(49, m_voc, ddr_bist, _, _, _, _, _, _, _),
+ [50] = PINGROUP(50, _, _, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, _, _, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, blsp_spi2, blsp_spi1, blsp_spi3, blsp_spi4, _, _, qdss_stm, _, _),
+ [53] = PINGROUP(53, pci_e, _, _, qdss_stm, _, _, _, _, _),
+ [54] = PINGROUP(54, qdss_cti, qdss_cti, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qdss_cti, qdss_cti, tgu_ch0, _, _, _, _, _, _),
+ [56] = PINGROUP(56, pcie_clkreq, _, qdss_stm, _, _, _, _, _, _),
+ [57] = PINGROUP(57, _, qdss_stm, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, _, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qdss_cti, m_voc, bimc_dte0, _, _, _, _, _, _),
+ [60] = PINGROUP(60, qdss_cti, _, m_voc, _, _, _, _, _, _),
+ [61] = PINGROUP(61, mgpi_clk, qdss_stm, qdss_gpio, bimc_dte1, _, _, _, _, _),
+ [62] = PINGROUP(62, i2s_mclk, audio_ref, blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4, ldo_update, qdss_stm, _),
+ [63] = PINGROUP(63, blsp_uart2, _, qdss_stm, qdss_gpio, atest, _, _, _, _),
+ [64] = PINGROUP(64, blsp_uart2, qdss_stm, qdss_gpio, atest, _, _, _, _, _),
+ [65] = PINGROUP(65, blsp_uart2, blsp_i2c2, _, qdss_stm, qdss_gpio, atest, _, _, _),
+ [66] = PINGROUP(66, blsp_uart2, blsp_i2c2, qdss_stm, qdss_gpio, atest, _, _, _, _),
+ [67] = PINGROUP(67, uim1_data, atest, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, uim1_present, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, uim1_reset, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, uim1_clk, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, mgpi_clk, blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4, _, _, _, _),
+ [72] = PINGROUP(72, qlink1_en, _, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qlink1_req, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, coex_uart2, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, coex_uart2, nav_gpio, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, _, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, spmi_vgi, blsp_i2c4, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, spmi_vgi, blsp_i2c4, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, _, blsp_spi1, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, _, blsp_spi1, _, gcc_plltest, _, _, _, _, _),
+ [82] = PINGROUP(82, _, blsp_spi1, _, blsp_i2c1, gcc_plltest, _, _, _, _),
+ [83] = PINGROUP(83, _, blsp_spi1, _, blsp_i2c1, _, _, _, _, _),
+ [84] = PINGROUP(84, _, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, _, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, _, _, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, _, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, _, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, _, _, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, _, _, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, _, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, _, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, _, _, usb2phy_ac, _, _, _, _, _, _),
+ [94] = PINGROUP(94, qdss_cti, qdss_cti, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, qdss_cti, qdss_cti, emac_pps1, _, _, _, _, _, _),
+ [96] = PINGROUP(96, _, _, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, _, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, _, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, _, _, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, emac_pps0, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ [109] = SDC_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
+ [110] = SDC_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+ [111] = SDC_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+ [112] = SDC_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data sdx55_pinctrl = {
+ .pins = sdx55_pins,
+ .npins = ARRAY_SIZE(sdx55_pins),
+ .functions = sdx55_functions,
+ .nfunctions = ARRAY_SIZE(sdx55_functions),
+ .groups = sdx55_groups,
+ .ngroups = ARRAY_SIZE(sdx55_groups),
+ .ngpios = 108,
+};
+
+static int sdx55_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sdx55_pinctrl);
+}
+
+static const struct of_device_id sdx55_pinctrl_of_match[] = {
+ { .compatible = "qcom,sdx55-pinctrl", },
+ { },
+};
+
+static struct platform_driver sdx55_pinctrl_driver = {
+ .driver = {
+ .name = "sdx55-pinctrl",
+ .of_match_table = sdx55_pinctrl_of_match,
+ },
+ .probe = sdx55_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sdx55_pinctrl_init(void)
+{
+ return platform_driver_register(&sdx55_pinctrl_driver);
+}
+arch_initcall(sdx55_pinctrl_init);
+
+static void __exit sdx55_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sdx55_pinctrl_driver);
+}
+module_exit(sdx55_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sdx55 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sdx55_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 17441388ce8f..9801c717e311 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1129,6 +1129,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
+ /* pmx55 has 11 GPIOs with holes on 3, 7, 10, 11 */
+ { .compatible = "qcom,pmx55-gpio", .data = (void *) 11 },
{ },
};
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index c528c124fb0e..2cc457279345 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -315,6 +315,7 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
range = NULL;
break;
+#ifdef CONFIG_PINCTRL_SH_PFC_GPIO
case PINMUX_TYPE_OUTPUT:
range = &pfc->info->output;
break;
@@ -322,6 +323,7 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
case PINMUX_TYPE_INPUT:
range = &pfc->info->input;
break;
+#endif /* CONFIG_PINCTRL_SH_PFC_GPIO */
default:
return -EINVAL;
diff --git a/drivers/pinctrl/renesas/core.h b/drivers/pinctrl/renesas/core.h
index b5b1d163e98a..5ca7e0830ae9 100644
--- a/drivers/pinctrl/renesas/core.h
+++ b/drivers/pinctrl/renesas/core.h
@@ -33,4 +33,8 @@ const struct pinmux_bias_reg *
sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
unsigned int *bit);
+unsigned int rcar_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin);
+void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias);
+
#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/pinctrl/renesas/gpio.c b/drivers/pinctrl/renesas/gpio.c
index 9c6e931ae766..ad06f5355d1e 100644
--- a/drivers/pinctrl/renesas/gpio.c
+++ b/drivers/pinctrl/renesas/gpio.c
@@ -328,7 +328,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
if (pfc->info->data_regs == NULL)
return 0;
- /* Find the memory window that contain the GPIO registers. Boards that
+ /* Find the memory window that contains the GPIO registers. Boards that
* register a separate GPIO device will not supply a memory resource
* that covers the data registers. In that case don't try to handle
* GPIOs.
diff --git a/drivers/pinctrl/renesas/pfc-r8a7778.c b/drivers/pinctrl/renesas/pfc-r8a7778.c
index a9875038ed9b..75f52b1798c3 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7778.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7778.c
@@ -2909,7 +2909,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
};
static const struct pinmux_bias_reg pinmux_bias_regs[] = {
- { PINMUX_BIAS_REG("PUPR0", 0x100, "N/A", 0) {
+ { PINMUX_BIAS_REG("PUPR0", 0xfffc0100, "N/A", 0) {
[ 0] = RCAR_GP_PIN(0, 6), /* A0 */
[ 1] = RCAR_GP_PIN(0, 7), /* A1 */
[ 2] = RCAR_GP_PIN(0, 8), /* A2 */
@@ -2943,7 +2943,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[30] = RCAR_GP_PIN(1, 7), /* /EX_CS4 */
[31] = RCAR_GP_PIN(1, 8), /* /EX_CS5 */
} },
- { PINMUX_BIAS_REG("PUPR1", 0x104, "N/A", 0) {
+ { PINMUX_BIAS_REG("PUPR1", 0xfffc0104, "N/A", 0) {
[ 0] = RCAR_GP_PIN(0, 0), /* /PRESETOUT */
[ 1] = RCAR_GP_PIN(0, 5), /* /BS */
[ 2] = RCAR_GP_PIN(1, 0), /* RD//WR */
@@ -2977,7 +2977,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[30] = SH_PFC_PIN_NONE,
[31] = SH_PFC_PIN_NONE,
} },
- { PINMUX_BIAS_REG("PUPR2", 0x108, "N/A", 0) {
+ { PINMUX_BIAS_REG("PUPR2", 0xfffc0108, "N/A", 0) {
[ 0] = RCAR_GP_PIN(1, 22), /* DU0_DR0 */
[ 1] = RCAR_GP_PIN(1, 23), /* DU0_DR1 */
[ 2] = RCAR_GP_PIN(1, 24), /* DU0_DR2 */
@@ -3011,7 +3011,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[30] = RCAR_GP_PIN(2, 21), /* DU0_CDE */
[31] = RCAR_GP_PIN(2, 16), /* DU0_DOTCLKOUT1 */
} },
- { PINMUX_BIAS_REG("PUPR3", 0x10c, "N/A", 0) {
+ { PINMUX_BIAS_REG("PUPR3", 0xfffc010c, "N/A", 0) {
[ 0] = RCAR_GP_PIN(3, 24), /* VI0_CLK */
[ 1] = RCAR_GP_PIN(3, 25), /* VI0_CLKENB */
[ 2] = RCAR_GP_PIN(3, 26), /* VI0_FIELD */
@@ -3045,7 +3045,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[30] = RCAR_GP_PIN(4, 18), /* ETH_MDIO */
[31] = RCAR_GP_PIN(4, 19), /* ETH_LINK */
} },
- { PINMUX_BIAS_REG("PUPR4", 0x110, "N/A", 0) {
+ { PINMUX_BIAS_REG("PUPR4", 0xfffc0110, "N/A", 0) {
[ 0] = RCAR_GP_PIN(3, 6), /* SSI_SCK012 */
[ 1] = RCAR_GP_PIN(3, 7), /* SSI_WS012 */
[ 2] = RCAR_GP_PIN(3, 10), /* SSI_SDATA0 */
@@ -3079,7 +3079,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[30] = RCAR_GP_PIN(1, 14), /* IRQ2 */
[31] = RCAR_GP_PIN(1, 15), /* IRQ3 */
} },
- { PINMUX_BIAS_REG("PUPR5", 0x114, "N/A", 0) {
+ { PINMUX_BIAS_REG("PUPR5", 0xfffc0114, "N/A", 0) {
[ 0] = RCAR_GP_PIN(0, 1), /* PENC0 */
[ 1] = RCAR_GP_PIN(0, 2), /* PENC1 */
[ 2] = RCAR_GP_PIN(0, 3), /* USB_OVC0 */
@@ -3116,48 +3116,9 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
-{
- const struct pinmux_bias_reg *reg;
- void __iomem *addr;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return PIN_CONFIG_BIAS_DISABLE;
-
- addr = pfc->windows->virt + reg->puen;
-
- if (ioread32(addr) & BIT(bit))
- return PIN_CONFIG_BIAS_PULL_UP;
- else
- return PIN_CONFIG_BIAS_DISABLE;
-}
-
-static void r8a7778_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- const struct pinmux_bias_reg *reg;
- void __iomem *addr;
- unsigned int bit;
- u32 value;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return;
-
- addr = pfc->windows->virt + reg->puen;
-
- value = ioread32(addr) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- value |= BIT(bit);
- iowrite32(value, addr);
-}
-
static const struct sh_pfc_soc_operations r8a7778_pfc_ops = {
- .get_bias = r8a7778_pinmux_get_bias,
- .set_bias = r8a7778_pinmux_set_bias,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
};
const struct sh_pfc_soc_info r8a7778_pinmux_info = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a7790.c b/drivers/pinctrl/renesas/pfc-r8a7790.c
index 60f973c5dffe..e9a64e0e2734 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7790.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7790.c
@@ -2393,6 +2393,8 @@ static const unsigned int intc_irq3_pins[] = {
static const unsigned int intc_irq3_mux[] = {
IRQ3_MARK,
};
+
+#ifdef CONFIG_PINCTRL_PFC_R8A7790
/* - MLB+ ------------------------------------------------------------------- */
static const unsigned int mlb_3pin_pins[] = {
RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 2),
@@ -2400,6 +2402,8 @@ static const unsigned int mlb_3pin_pins[] = {
static const unsigned int mlb_3pin_mux[] = {
MLB_CLK_MARK, MLB_SIG_MARK, MLB_DAT_MARK,
};
+#endif /* CONFIG_PINCTRL_PFC_R8A7790 */
+
/* - MMCIF0 ----------------------------------------------------------------- */
static const unsigned int mmc0_data1_pins[] = {
/* D[0] */
@@ -3866,6 +3870,72 @@ static const unsigned int vin1_data18_mux[] = {
VI1_R4_MARK, VI1_R5_MARK,
VI1_R6_MARK, VI1_R7_MARK,
};
+static const union vin_data vin1_data_b_pins = {
+ .data24 = {
+ /* B */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+ /* G */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 20),
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 12),
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 7),
+ /* R */
+ RCAR_GP_PIN(0, 27), RCAR_GP_PIN(0, 28),
+ RCAR_GP_PIN(0, 29), RCAR_GP_PIN(1, 4),
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+ RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 8),
+ },
+};
+static const union vin_data vin1_data_b_mux = {
+ .data24 = {
+ /* B */
+ VI1_DATA0_VI1_B0_B_MARK, VI1_DATA1_VI1_B1_B_MARK,
+ VI1_DATA2_VI1_B2_B_MARK, VI1_DATA3_VI1_B3_B_MARK,
+ VI1_DATA4_VI1_B4_B_MARK, VI1_DATA5_VI1_B5_B_MARK,
+ VI1_DATA6_VI1_B6_B_MARK, VI1_DATA7_VI1_B7_B_MARK,
+ /* G */
+ VI1_G0_B_MARK, VI1_G1_B_MARK,
+ VI1_G2_B_MARK, VI1_G3_B_MARK,
+ VI1_G4_B_MARK, VI1_G5_B_MARK,
+ VI1_G6_B_MARK, VI1_G7_B_MARK,
+ /* R */
+ VI1_R0_B_MARK, VI1_R1_B_MARK,
+ VI1_R2_B_MARK, VI1_R3_B_MARK,
+ VI1_R4_B_MARK, VI1_R5_B_MARK,
+ VI1_R6_B_MARK, VI1_R7_B_MARK,
+ },
+};
+static const unsigned int vin1_data18_b_pins[] = {
+ /* B */
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+ /* G */
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 20),
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 12),
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 7),
+ /* R */
+ RCAR_GP_PIN(0, 29), RCAR_GP_PIN(1, 4),
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+ RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int vin1_data18_b_mux[] = {
+ /* B */
+ VI1_DATA2_VI1_B2_B_MARK, VI1_DATA3_VI1_B3_B_MARK,
+ VI1_DATA4_VI1_B4_B_MARK, VI1_DATA5_VI1_B5_B_MARK,
+ VI1_DATA6_VI1_B6_B_MARK, VI1_DATA7_VI1_B7_B_MARK,
+ /* G */
+ VI1_G2_B_MARK, VI1_G3_B_MARK,
+ VI1_G4_B_MARK, VI1_G5_B_MARK,
+ VI1_G6_B_MARK, VI1_G7_B_MARK,
+ /* R */
+ VI1_R2_B_MARK, VI1_R3_B_MARK,
+ VI1_R4_B_MARK, VI1_R5_B_MARK,
+ VI1_R6_B_MARK, VI1_R7_B_MARK,
+};
static const unsigned int vin1_sync_pins[] = {
RCAR_GP_PIN(1, 24), /* HSYNC */
RCAR_GP_PIN(1, 25), /* VSYNC */
@@ -3874,24 +3944,50 @@ static const unsigned int vin1_sync_mux[] = {
VI1_HSYNC_N_MARK,
VI1_VSYNC_N_MARK,
};
+static const unsigned int vin1_sync_b_pins[] = {
+ RCAR_GP_PIN(1, 24), /* HSYNC */
+ RCAR_GP_PIN(1, 25), /* VSYNC */
+};
+static const unsigned int vin1_sync_b_mux[] = {
+ VI1_HSYNC_N_B_MARK,
+ VI1_VSYNC_N_B_MARK,
+};
static const unsigned int vin1_field_pins[] = {
RCAR_GP_PIN(1, 13),
};
static const unsigned int vin1_field_mux[] = {
VI1_FIELD_MARK,
};
+static const unsigned int vin1_field_b_pins[] = {
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int vin1_field_b_mux[] = {
+ VI1_FIELD_B_MARK,
+};
static const unsigned int vin1_clkenb_pins[] = {
RCAR_GP_PIN(1, 26),
};
static const unsigned int vin1_clkenb_mux[] = {
VI1_CLKENB_MARK,
};
+static const unsigned int vin1_clkenb_b_pins[] = {
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int vin1_clkenb_b_mux[] = {
+ VI1_CLKENB_B_MARK,
+};
static const unsigned int vin1_clk_pins[] = {
RCAR_GP_PIN(2, 9),
};
static const unsigned int vin1_clk_mux[] = {
VI1_CLK_MARK,
};
+static const unsigned int vin1_clk_b_pins[] = {
+ RCAR_GP_PIN(3, 15),
+};
+static const unsigned int vin1_clk_b_mux[] = {
+ VI1_CLK_B_MARK,
+};
/* - VIN2 ----------------------------------------------------------------- */
static const union vin_data vin2_data_pins = {
.data24 = {
@@ -3959,6 +4055,18 @@ static const unsigned int vin2_data18_mux[] = {
VI2_R4_MARK, VI2_R5_MARK,
VI2_R6_MARK, VI2_R7_MARK,
};
+static const unsigned int vin2_g8_pins[] = {
+ RCAR_GP_PIN(0, 27), RCAR_GP_PIN(0, 28),
+ RCAR_GP_PIN(0, 29), RCAR_GP_PIN(1, 10),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin2_g8_mux[] = {
+ VI2_G0_MARK, VI2_G1_MARK,
+ VI2_G2_MARK, VI2_G3_MARK,
+ VI2_G4_MARK, VI2_G5_MARK,
+ VI2_G6_MARK, VI2_G7_MARK,
+};
static const unsigned int vin2_sync_pins[] = {
RCAR_GP_PIN(1, 16), /* HSYNC */
RCAR_GP_PIN(1, 21), /* VSYNC */
@@ -4026,8 +4134,10 @@ static const unsigned int vin3_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[298];
+ struct sh_pfc_pin_group common[311];
+#ifdef CONFIG_PINCTRL_PFC_R8A7790
struct sh_pfc_pin_group automotive[1];
+#endif
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a),
@@ -4310,15 +4420,28 @@ static const struct {
VIN_DATA_PIN_GROUP(vin1_data, 10),
VIN_DATA_PIN_GROUP(vin1_data, 8),
VIN_DATA_PIN_GROUP(vin1_data, 4),
+ VIN_DATA_PIN_GROUP(vin1_data, 24, _b),
+ VIN_DATA_PIN_GROUP(vin1_data, 20, _b),
+ SH_PFC_PIN_GROUP(vin1_data18_b),
+ VIN_DATA_PIN_GROUP(vin1_data, 16, _b),
+ VIN_DATA_PIN_GROUP(vin1_data, 12, _b),
+ VIN_DATA_PIN_GROUP(vin1_data, 10, _b),
+ VIN_DATA_PIN_GROUP(vin1_data, 8, _b),
+ VIN_DATA_PIN_GROUP(vin1_data, 4, _b),
SH_PFC_PIN_GROUP(vin1_sync),
+ SH_PFC_PIN_GROUP(vin1_sync_b),
SH_PFC_PIN_GROUP(vin1_field),
+ SH_PFC_PIN_GROUP(vin1_field_b),
SH_PFC_PIN_GROUP(vin1_clkenb),
+ SH_PFC_PIN_GROUP(vin1_clkenb_b),
SH_PFC_PIN_GROUP(vin1_clk),
+ SH_PFC_PIN_GROUP(vin1_clk_b),
VIN_DATA_PIN_GROUP(vin2_data, 24),
SH_PFC_PIN_GROUP(vin2_data18),
VIN_DATA_PIN_GROUP(vin2_data, 16),
VIN_DATA_PIN_GROUP(vin2_data, 8),
VIN_DATA_PIN_GROUP(vin2_data, 4),
+ SH_PFC_PIN_GROUP(vin2_g8),
SH_PFC_PIN_GROUP(vin2_sync),
SH_PFC_PIN_GROUP(vin2_field),
SH_PFC_PIN_GROUP(vin2_clkenb),
@@ -4329,9 +4452,11 @@ static const struct {
SH_PFC_PIN_GROUP(vin3_clkenb),
SH_PFC_PIN_GROUP(vin3_clk),
},
+#ifdef CONFIG_PINCTRL_PFC_R8A7790
.automotive = {
SH_PFC_PIN_GROUP(mlb_3pin),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A7790 */
};
static const char * const audio_clk_groups[] = {
@@ -4475,9 +4600,11 @@ static const char * const intc_groups[] = {
"intc_irq3",
};
+#ifdef CONFIG_PINCTRL_PFC_R8A7790
static const char * const mlb_groups[] = {
"mlb_3pin",
};
+#endif /* CONFIG_PINCTRL_PFC_R8A7790 */
static const char * const mmc0_groups[] = {
"mmc0_data1",
@@ -4784,10 +4911,22 @@ static const char * const vin1_groups[] = {
"vin1_data10",
"vin1_data8",
"vin1_data4",
+ "vin1_data24_b",
+ "vin1_data20_b",
+ "vin1_data18_b",
+ "vin1_data16_b",
+ "vin1_data12_b",
+ "vin1_data10_b",
+ "vin1_data8_b",
+ "vin1_data4_b",
"vin1_sync",
+ "vin1_sync_b",
"vin1_field",
+ "vin1_field_b",
"vin1_clkenb",
+ "vin1_clkenb_b",
"vin1_clk",
+ "vin1_clk_b",
};
static const char * const vin2_groups[] = {
@@ -4796,6 +4935,7 @@ static const char * const vin2_groups[] = {
"vin2_data16",
"vin2_data8",
"vin2_data4",
+ "vin2_g8",
"vin2_sync",
"vin2_field",
"vin2_clkenb",
@@ -4812,7 +4952,9 @@ static const char * const vin3_groups[] = {
static const struct {
struct sh_pfc_function common[58];
+#ifdef CONFIG_PINCTRL_PFC_R8A7790
struct sh_pfc_function automotive[1];
+#endif
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -4874,9 +5016,11 @@ static const struct {
SH_PFC_FUNCTION(vin2),
SH_PFC_FUNCTION(vin3),
},
+#ifdef CONFIG_PINCTRL_PFC_R8A7790
.automotive = {
SH_PFC_FUNCTION(mlb),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A7790 */
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a7791.c b/drivers/pinctrl/renesas/pfc-r8a7791.c
index bc9caf812fc1..6fce9fe2e98f 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7791.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7791.c
@@ -1700,6 +1700,7 @@ static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
+#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
/* - ADI -------------------------------------------------------------------- */
static const unsigned int adi_common_pins[] = {
/* ADIDATA, ADICS/SAMP, ADICLK */
@@ -1765,6 +1766,7 @@ static const unsigned int adi_chsel2_b_mux[] = {
/* ADICHS B 2 */
ADICHS2_B_MARK,
};
+#endif /* CONFIG_PINCTRL_PFC_R8A7791 || CONFIG_PINCTRL_PFC_R8A7793 */
/* - Audio Clock ------------------------------------------------------------ */
static const unsigned int audio_clk_a_pins[] = {
@@ -2553,6 +2555,8 @@ static const unsigned int intc_irq3_pins[] = {
static const unsigned int intc_irq3_mux[] = {
IRQ3_MARK,
};
+
+#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
/* - MLB+ ------------------------------------------------------------------- */
static const unsigned int mlb_3pin_pins[] = {
RCAR_GP_PIN(7, 7), RCAR_GP_PIN(7, 8), RCAR_GP_PIN(7, 9),
@@ -2560,6 +2564,8 @@ static const unsigned int mlb_3pin_pins[] = {
static const unsigned int mlb_3pin_mux[] = {
MLB_CLK_MARK, MLB_SIG_MARK, MLB_DAT_MARK,
};
+#endif /* CONFIG_PINCTRL_PFC_R8A7791 || CONFIG_PINCTRL_PFC_R8A7793 */
+
/* - MMCIF ------------------------------------------------------------------ */
static const unsigned int mmc_data1_pins[] = {
/* D[0] */
@@ -4452,7 +4458,9 @@ static const unsigned int vin2_clk_mux[] = {
static const struct {
struct sh_pfc_pin_group common[346];
+#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
struct sh_pfc_pin_group automotive[9];
+#endif
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a),
@@ -4802,6 +4810,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin2_clkenb),
SH_PFC_PIN_GROUP(vin2_clk),
},
+#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
.automotive = {
SH_PFC_PIN_GROUP(adi_common),
SH_PFC_PIN_GROUP(adi_chsel0),
@@ -4813,8 +4822,10 @@ static const struct {
SH_PFC_PIN_GROUP(adi_chsel2_b),
SH_PFC_PIN_GROUP(mlb_3pin),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A7791 || CONFIG_PINCTRL_PFC_R8A7793 */
};
+#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
static const char * const adi_groups[] = {
"adi_common",
"adi_chsel0",
@@ -4825,6 +4836,7 @@ static const char * const adi_groups[] = {
"adi_chsel1_b",
"adi_chsel2_b",
};
+#endif /* CONFIG_PINCTRL_PFC_R8A7791 || CONFIG_PINCTRL_PFC_R8A7793 */
static const char * const audio_clk_groups[] = {
"audio_clk_a",
@@ -5002,9 +5014,11 @@ static const char * const intc_groups[] = {
"intc_irq3",
};
+#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
static const char * const mlb_groups[] = {
"mlb_3pin",
};
+#endif /* CONFIG_PINCTRL_PFC_R8A7791 || CONFIG_PINCTRL_PFC_R8A7793 */
static const char * const mmc_groups[] = {
"mmc_data1",
@@ -5359,7 +5373,9 @@ static const char * const vin2_groups[] = {
static const struct {
struct sh_pfc_function common[58];
+#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
struct sh_pfc_function automotive[2];
+#endif
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -5421,10 +5437,12 @@ static const struct {
SH_PFC_FUNCTION(vin1),
SH_PFC_FUNCTION(vin2),
},
+#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
.automotive = {
SH_PFC_FUNCTION(adi),
SH_PFC_FUNCTION(mlb),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A7791 || CONFIG_PINCTRL_PFC_R8A7793 */
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a77950.c b/drivers/pinctrl/renesas/pfc-r8a77950.c
index 04812e62f3a4..32b66b9999b8 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77950.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77950.c
@@ -5820,51 +5820,10 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a77950_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
-{
- const struct pinmux_bias_reg *reg;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return PIN_CONFIG_BIAS_DISABLE;
-
- if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
- return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
- return PIN_CONFIG_BIAS_PULL_UP;
- else
- return PIN_CONFIG_BIAS_PULL_DOWN;
-}
-
-static void r8a77950_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- const struct pinmux_bias_reg *reg;
- u32 enable, updown;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return;
-
- enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
- if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= BIT(bit);
-
- updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= BIT(bit);
-
- sh_pfc_write(pfc, reg->pud, updown);
- sh_pfc_write(pfc, reg->puen, enable);
-}
-
static const struct sh_pfc_soc_operations r8a77950_pinmux_ops = {
.pin_to_pocctrl = r8a77950_pin_to_pocctrl,
- .get_bias = r8a77950_pinmux_get_bias,
- .set_bias = r8a77950_pinmux_set_bias,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
};
const struct sh_pfc_soc_info r8a77950_pinmux_info = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a77951.c b/drivers/pinctrl/renesas/pfc-r8a77951.c
index a94ebe0bf5d0..cf14420794c7 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77951.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77951.c
@@ -1827,6 +1827,7 @@ static const unsigned int canfd1_data_mux[] = {
CANFD1_TX_MARK, CANFD1_RX_MARK,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
/* - DRIF0 --------------------------------------------------------------- */
static const unsigned int drif0_ctrl_a_pins[] = {
/* CLK, SYNC */
@@ -2041,6 +2042,7 @@ static const unsigned int drif3_data1_b_pins[] = {
static const unsigned int drif3_data1_b_mux[] = {
RIF3_D1_B_MARK,
};
+#endif /* CONFIG_PINCTRL_PFC_R8A77951 */
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
@@ -3250,6 +3252,57 @@ static const unsigned int pwm6_b_mux[] = {
PWM6_B_MARK,
};
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* QSPI0_SPCLK, QSPI0_SSL */
+ PIN_QSPI0_SPCLK, PIN_QSPI0_SSL,
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data2_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+ PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1,
+};
+static const unsigned int qspi0_data2_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+};
+static const unsigned int qspi0_data4_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+ PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1,
+ /* QSPI0_IO2, QSPI0_IO3 */
+ PIN_QSPI0_IO2, PIN_QSPI0_IO3,
+};
+static const unsigned int qspi0_data4_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+};
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* QSPI1_SPCLK, QSPI1_SSL */
+ PIN_QSPI1_SPCLK, PIN_QSPI1_SSL,
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data2_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+ PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1,
+};
+static const unsigned int qspi1_data2_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+};
+static const unsigned int qspi1_data4_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+ PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1,
+ /* QSPI1_IO2, QSPI1_IO3 */
+ PIN_QSPI1_IO2, PIN_QSPI1_IO3,
+};
+static const unsigned int qspi1_data4_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK,
+};
+
/* - SATA --------------------------------------------------------------------*/
static const unsigned int sata0_devslp_a_pins[] = {
/* DEVSLP */
@@ -4158,8 +4211,10 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[320];
+ struct sh_pfc_pin_group common[326];
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
struct sh_pfc_pin_group automotive[30];
+#endif
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
@@ -4361,6 +4416,12 @@ static const struct {
SH_PFC_PIN_GROUP(pwm5_b),
SH_PFC_PIN_GROUP(pwm6_a),
SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ SH_PFC_PIN_GROUP(qspi0_data2),
+ SH_PFC_PIN_GROUP(qspi0_data4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ SH_PFC_PIN_GROUP(qspi1_data2),
+ SH_PFC_PIN_GROUP(qspi1_data4),
SH_PFC_PIN_GROUP(sata0_devslp_a),
SH_PFC_PIN_GROUP(sata0_devslp_b),
SH_PFC_PIN_GROUP(scif0_data),
@@ -4483,6 +4544,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin5_clkenb),
SH_PFC_PIN_GROUP(vin5_clk),
},
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
.automotive = {
SH_PFC_PIN_GROUP(drif0_ctrl_a),
SH_PFC_PIN_GROUP(drif0_data0_a),
@@ -4515,7 +4577,7 @@ static const struct {
SH_PFC_PIN_GROUP(drif3_data0_b),
SH_PFC_PIN_GROUP(drif3_data1_b),
}
-
+#endif /* CONFIG_PINCTRL_PFC_R8A77951 */
};
static const char * const audio_clk_groups[] = {
@@ -4574,6 +4636,7 @@ static const char * const canfd1_groups[] = {
"canfd1_data",
};
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
static const char * const drif0_groups[] = {
"drif0_ctrl_a",
"drif0_data0_a",
@@ -4615,6 +4678,7 @@ static const char * const drif3_groups[] = {
"drif3_data0_b",
"drif3_data1_b",
};
+#endif /* CONFIG_PINCTRL_PFC_R8A77951 */
static const char * const du_groups[] = {
"du_rgb666",
@@ -4852,6 +4916,18 @@ static const char * const pwm6_groups[] = {
"pwm6_b",
};
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
static const char * const sata0_groups[] = {
"sata0_devslp_a",
"sata0_devslp_b",
@@ -5040,8 +5116,10 @@ static const char * const vin5_groups[] = {
};
static const struct {
- struct sh_pfc_function common[53];
+ struct sh_pfc_function common[55];
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
struct sh_pfc_function automotive[4];
+#endif
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -5075,6 +5153,8 @@ static const struct {
SH_PFC_FUNCTION(pwm4),
SH_PFC_FUNCTION(pwm5),
SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
SH_PFC_FUNCTION(sata0),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
@@ -5098,13 +5178,14 @@ static const struct {
SH_PFC_FUNCTION(vin4),
SH_PFC_FUNCTION(vin5),
},
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
.automotive = {
SH_PFC_FUNCTION(drif0),
SH_PFC_FUNCTION(drif1),
SH_PFC_FUNCTION(drif2),
SH_PFC_FUNCTION(drif3),
}
-
+#endif /* CONFIG_PINCTRL_PFC_R8A77951 */
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -6191,51 +6272,10 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a77951_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
-{
- const struct pinmux_bias_reg *reg;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return PIN_CONFIG_BIAS_DISABLE;
-
- if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
- return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
- return PIN_CONFIG_BIAS_PULL_UP;
- else
- return PIN_CONFIG_BIAS_PULL_DOWN;
-}
-
-static void r8a77951_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- const struct pinmux_bias_reg *reg;
- u32 enable, updown;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return;
-
- enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
- if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= BIT(bit);
-
- updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= BIT(bit);
-
- sh_pfc_write(pfc, reg->pud, updown);
- sh_pfc_write(pfc, reg->puen, enable);
-}
-
static const struct sh_pfc_soc_operations r8a77951_pinmux_ops = {
.pin_to_pocctrl = r8a77951_pin_to_pocctrl,
- .get_bias = r8a77951_pinmux_get_bias,
- .set_bias = r8a77951_pinmux_set_bias,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
};
#ifdef CONFIG_PINCTRL_PFC_R8A774E1
diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
index 55f0344a3d3e..38d963561b5f 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7796.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
@@ -1831,6 +1831,7 @@ static const unsigned int canfd1_data_mux[] = {
CANFD1_TX_MARK, CANFD1_RX_MARK,
};
+#if defined(CONFIG_PINCTRL_PFC_R8A77960) || defined(CONFIG_PINCTRL_PFC_R8A77961)
/* - DRIF0 --------------------------------------------------------------- */
static const unsigned int drif0_ctrl_a_pins[] = {
/* CLK, SYNC */
@@ -2045,6 +2046,7 @@ static const unsigned int drif3_data1_b_pins[] = {
static const unsigned int drif3_data1_b_mux[] = {
RIF3_D1_B_MARK,
};
+#endif /* CONFIG_PINCTRL_PFC_R8A77960 || CONFIG_PINCTRL_PFC_R8A77961 */
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
@@ -3255,6 +3257,57 @@ static const unsigned int pwm6_b_mux[] = {
PWM6_B_MARK,
};
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* QSPI0_SPCLK, QSPI0_SSL */
+ PIN_QSPI0_SPCLK, PIN_QSPI0_SSL,
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data2_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+ PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1,
+};
+static const unsigned int qspi0_data2_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+};
+static const unsigned int qspi0_data4_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+ PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1,
+ /* QSPI0_IO2, QSPI0_IO3 */
+ PIN_QSPI0_IO2, PIN_QSPI0_IO3,
+};
+static const unsigned int qspi0_data4_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+};
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* QSPI1_SPCLK, QSPI1_SSL */
+ PIN_QSPI1_SPCLK, PIN_QSPI1_SSL,
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data2_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+ PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1,
+};
+static const unsigned int qspi1_data2_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+};
+static const unsigned int qspi1_data4_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+ PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1,
+ /* QSPI1_IO2, QSPI1_IO3 */
+ PIN_QSPI1_IO2, PIN_QSPI1_IO3,
+};
+static const unsigned int qspi1_data4_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -4132,8 +4185,10 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[316];
+ struct sh_pfc_pin_group common[322];
+#if defined(CONFIG_PINCTRL_PFC_R8A77960) || defined(CONFIG_PINCTRL_PFC_R8A77961)
struct sh_pfc_pin_group automotive[30];
+#endif
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
@@ -4335,6 +4390,12 @@ static const struct {
SH_PFC_PIN_GROUP(pwm5_b),
SH_PFC_PIN_GROUP(pwm6_a),
SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ SH_PFC_PIN_GROUP(qspi0_data2),
+ SH_PFC_PIN_GROUP(qspi0_data4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ SH_PFC_PIN_GROUP(qspi1_data2),
+ SH_PFC_PIN_GROUP(qspi1_data4),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -4453,6 +4514,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin5_clkenb),
SH_PFC_PIN_GROUP(vin5_clk),
},
+#if defined(CONFIG_PINCTRL_PFC_R8A77960) || defined(CONFIG_PINCTRL_PFC_R8A77961)
.automotive = {
SH_PFC_PIN_GROUP(drif0_ctrl_a),
SH_PFC_PIN_GROUP(drif0_data0_a),
@@ -4485,6 +4547,7 @@ static const struct {
SH_PFC_PIN_GROUP(drif3_data0_b),
SH_PFC_PIN_GROUP(drif3_data1_b),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A77960 || CONFIG_PINCTRL_PFC_R8A77961 */
};
static const char * const audio_clk_groups[] = {
@@ -4543,6 +4606,7 @@ static const char * const canfd1_groups[] = {
"canfd1_data",
};
+#if defined(CONFIG_PINCTRL_PFC_R8A77960) || defined(CONFIG_PINCTRL_PFC_R8A77961)
static const char * const drif0_groups[] = {
"drif0_ctrl_a",
"drif0_data0_a",
@@ -4584,6 +4648,7 @@ static const char * const drif3_groups[] = {
"drif3_data0_b",
"drif3_data1_b",
};
+#endif /* CONFIG_PINCTRL_PFC_R8A77960 || CONFIG_PINCTRL_PFC_R8A77961 */
static const char * const du_groups[] = {
"du_rgb666",
@@ -4821,6 +4886,18 @@ static const char * const pwm6_groups[] = {
"pwm6_b",
};
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
static const char * const scif0_groups[] = {
"scif0_data",
"scif0_clk",
@@ -4996,8 +5073,10 @@ static const char * const vin5_groups[] = {
};
static const struct {
- struct sh_pfc_function common[50];
+ struct sh_pfc_function common[52];
+#if defined(CONFIG_PINCTRL_PFC_R8A77960) || defined(CONFIG_PINCTRL_PFC_R8A77961)
struct sh_pfc_function automotive[4];
+#endif
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -5031,6 +5110,8 @@ static const struct {
SH_PFC_FUNCTION(pwm4),
SH_PFC_FUNCTION(pwm5),
SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -5051,12 +5132,14 @@ static const struct {
SH_PFC_FUNCTION(vin4),
SH_PFC_FUNCTION(vin5),
},
+#if defined(CONFIG_PINCTRL_PFC_R8A77960) || defined(CONFIG_PINCTRL_PFC_R8A77961)
.automotive = {
SH_PFC_FUNCTION(drif0),
SH_PFC_FUNCTION(drif1),
SH_PFC_FUNCTION(drif2),
SH_PFC_FUNCTION(drif3),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A77960 || CONFIG_PINCTRL_PFC_R8A77961 */
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -6138,51 +6221,10 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a7796_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
-{
- const struct pinmux_bias_reg *reg;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return PIN_CONFIG_BIAS_DISABLE;
-
- if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
- return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
- return PIN_CONFIG_BIAS_PULL_UP;
- else
- return PIN_CONFIG_BIAS_PULL_DOWN;
-}
-
-static void r8a7796_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- const struct pinmux_bias_reg *reg;
- u32 enable, updown;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return;
-
- enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
- if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= BIT(bit);
-
- updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= BIT(bit);
-
- sh_pfc_write(pfc, reg->pud, updown);
- sh_pfc_write(pfc, reg->puen, enable);
-}
-
static const struct sh_pfc_soc_operations r8a7796_pinmux_ops = {
.pin_to_pocctrl = r8a7796_pin_to_pocctrl,
- .get_bias = r8a7796_pinmux_get_bias,
- .set_bias = r8a7796_pinmux_set_bias,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
};
#ifdef CONFIG_PINCTRL_PFC_R8A774A1
diff --git a/drivers/pinctrl/renesas/pfc-r8a77965.c b/drivers/pinctrl/renesas/pfc-r8a77965.c
index 7a50b9b69a7d..92f231baff7d 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77965.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77965.c
@@ -1847,6 +1847,7 @@ static const unsigned int canfd1_data_mux[] = {
CANFD1_TX_MARK, CANFD1_RX_MARK,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
/* - DRIF0 --------------------------------------------------------------- */
static const unsigned int drif0_ctrl_a_pins[] = {
/* CLK, SYNC */
@@ -2120,6 +2121,7 @@ static const unsigned int drif3_data1_b_pins[] = {
static const unsigned int drif3_data1_b_mux[] = {
RIF3_D1_B_MARK,
};
+#endif /* CONFIG_PINCTRL_PFC_R8A77965 */
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
@@ -3406,6 +3408,57 @@ static const unsigned int pwm6_b_mux[] = {
PWM6_B_MARK,
};
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* QSPI0_SPCLK, QSPI0_SSL */
+ PIN_QSPI0_SPCLK, PIN_QSPI0_SSL,
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data2_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+ PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1,
+};
+static const unsigned int qspi0_data2_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+};
+static const unsigned int qspi0_data4_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+ PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1,
+ /* QSPI0_IO2, QSPI0_IO3 */
+ PIN_QSPI0_IO2, PIN_QSPI0_IO3,
+};
+static const unsigned int qspi0_data4_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+};
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* QSPI1_SPCLK, QSPI1_SSL */
+ PIN_QSPI1_SPCLK, PIN_QSPI1_SSL,
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data2_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+ PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1,
+};
+static const unsigned int qspi1_data2_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+};
+static const unsigned int qspi1_data4_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+ PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1,
+ /* QSPI1_IO2, QSPI1_IO3 */
+ PIN_QSPI1_IO2, PIN_QSPI1_IO3,
+};
+static const unsigned int qspi1_data4_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK,
+};
+
/* - SATA --------------------------------------------------------------------*/
static const unsigned int sata0_devslp_a_pins[] = {
/* DEVSLP */
@@ -4379,8 +4432,10 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[318];
+ struct sh_pfc_pin_group common[324];
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
struct sh_pfc_pin_group automotive[30];
+#endif
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
@@ -4582,6 +4637,12 @@ static const struct {
SH_PFC_PIN_GROUP(pwm5_b),
SH_PFC_PIN_GROUP(pwm6_a),
SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ SH_PFC_PIN_GROUP(qspi0_data2),
+ SH_PFC_PIN_GROUP(qspi0_data4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ SH_PFC_PIN_GROUP(qspi1_data2),
+ SH_PFC_PIN_GROUP(qspi1_data4),
SH_PFC_PIN_GROUP(sata0_devslp_a),
SH_PFC_PIN_GROUP(sata0_devslp_b),
SH_PFC_PIN_GROUP(scif0_data),
@@ -4702,6 +4763,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin5_clkenb),
SH_PFC_PIN_GROUP(vin5_clk),
},
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
.automotive = {
SH_PFC_PIN_GROUP(drif0_ctrl_a),
SH_PFC_PIN_GROUP(drif0_data0_a),
@@ -4734,6 +4796,7 @@ static const struct {
SH_PFC_PIN_GROUP(drif3_data0_b),
SH_PFC_PIN_GROUP(drif3_data1_b),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A77965 */
};
static const char * const audio_clk_groups[] = {
@@ -4792,6 +4855,7 @@ static const char * const canfd1_groups[] = {
"canfd1_data",
};
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
static const char * const drif0_groups[] = {
"drif0_ctrl_a",
"drif0_data0_a",
@@ -4833,6 +4897,7 @@ static const char * const drif3_groups[] = {
"drif3_data0_b",
"drif3_data1_b",
};
+#endif /* CONFIG_PINCTRL_PFC_R8A77965 */
static const char * const du_groups[] = {
"du_rgb666",
@@ -5070,6 +5135,18 @@ static const char * const pwm6_groups[] = {
"pwm6_b",
};
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
static const char * const sata0_groups[] = {
"sata0_devslp_a",
"sata0_devslp_b",
@@ -5249,8 +5326,10 @@ static const char * const vin5_groups[] = {
};
static const struct {
- struct sh_pfc_function common[51];
+ struct sh_pfc_function common[53];
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
struct sh_pfc_function automotive[4];
+#endif
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -5284,6 +5363,8 @@ static const struct {
SH_PFC_FUNCTION(pwm4),
SH_PFC_FUNCTION(pwm5),
SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
SH_PFC_FUNCTION(sata0),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
@@ -5305,12 +5386,14 @@ static const struct {
SH_PFC_FUNCTION(vin4),
SH_PFC_FUNCTION(vin5),
},
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
.automotive = {
SH_PFC_FUNCTION(drif0),
SH_PFC_FUNCTION(drif1),
SH_PFC_FUNCTION(drif2),
SH_PFC_FUNCTION(drif3),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A77965 */
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -6392,51 +6475,10 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a77965_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
-{
- const struct pinmux_bias_reg *reg;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return PIN_CONFIG_BIAS_DISABLE;
-
- if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
- return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
- return PIN_CONFIG_BIAS_PULL_UP;
- else
- return PIN_CONFIG_BIAS_PULL_DOWN;
-}
-
-static void r8a77965_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- const struct pinmux_bias_reg *reg;
- u32 enable, updown;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return;
-
- enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
- if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= BIT(bit);
-
- updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= BIT(bit);
-
- sh_pfc_write(pfc, reg->pud, updown);
- sh_pfc_write(pfc, reg->puen, enable);
-}
-
static const struct sh_pfc_soc_operations r8a77965_pinmux_ops = {
.pin_to_pocctrl = r8a77965_pin_to_pocctrl,
- .get_bias = r8a77965_pinmux_get_bias,
- .set_bias = r8a77965_pinmux_set_bias,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
};
#ifdef CONFIG_PINCTRL_PFC_R8A774B1
diff --git a/drivers/pinctrl/renesas/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c
index aed04a4c6116..0a32e3c317c1 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77990.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77990.c
@@ -1593,6 +1593,7 @@ static const unsigned int canfd1_data_mux[] = {
CANFD1_TX_MARK, CANFD1_RX_MARK,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A77990
/* - DRIF0 --------------------------------------------------------------- */
static const unsigned int drif0_ctrl_a_pins[] = {
/* CLK, SYNC */
@@ -1785,6 +1786,7 @@ static const unsigned int drif3_data1_b_pins[] = {
static const unsigned int drif3_data1_b_mux[] = {
RIF3_D1_B_MARK,
};
+#endif /* CONFIG_PINCTRL_PFC_R8A77990 */
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
@@ -2808,6 +2810,57 @@ static const unsigned int pwm6_b_mux[] = {
PWM6_B_MARK,
};
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* QSPI0_SPCLK, QSPI0_SSL */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 5),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data2_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+ RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 2),
+};
+static const unsigned int qspi0_data2_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+};
+static const unsigned int qspi0_data4_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+ RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 2),
+ /* QSPI0_IO2, QSPI0_IO3 */
+ RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 4),
+};
+static const unsigned int qspi0_data4_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+};
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* QSPI1_SPCLK, QSPI1_SSL */
+ RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data2_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int qspi1_data2_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+};
+static const unsigned int qspi1_data4_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+ /* QSPI1_IO2, QSPI1_IO3 */
+ RCAR_GP_PIN(2, 9), RCAR_GP_PIN(2, 10),
+};
+static const unsigned int qspi1_data4_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_a_pins[] = {
/* RX, TX */
@@ -3760,8 +3813,10 @@ static const unsigned int vin5_clk_b_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[247];
+ struct sh_pfc_pin_group common[253];
+#ifdef CONFIG_PINCTRL_PFC_R8A77990
struct sh_pfc_pin_group automotive[21];
+#endif
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a),
@@ -3906,6 +3961,12 @@ static const struct {
SH_PFC_PIN_GROUP(pwm5_b),
SH_PFC_PIN_GROUP(pwm6_a),
SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ SH_PFC_PIN_GROUP(qspi0_data2),
+ SH_PFC_PIN_GROUP(qspi0_data4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ SH_PFC_PIN_GROUP(qspi1_data2),
+ SH_PFC_PIN_GROUP(qspi1_data4),
SH_PFC_PIN_GROUP(scif0_data_a),
SH_PFC_PIN_GROUP(scif0_clk_a),
SH_PFC_PIN_GROUP(scif0_ctrl_a),
@@ -4012,6 +4073,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin5_clk_a),
SH_PFC_PIN_GROUP(vin5_clk_b),
},
+#ifdef CONFIG_PINCTRL_PFC_R8A77990
.automotive = {
SH_PFC_PIN_GROUP(drif0_ctrl_a),
SH_PFC_PIN_GROUP(drif0_data0_a),
@@ -4035,6 +4097,7 @@ static const struct {
SH_PFC_PIN_GROUP(drif3_data0_b),
SH_PFC_PIN_GROUP(drif3_data1_b),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A77990 */
};
static const char * const audio_clk_groups[] = {
@@ -4088,6 +4151,7 @@ static const char * const canfd1_groups[] = {
"canfd1_data",
};
+#ifdef CONFIG_PINCTRL_PFC_R8A77990
static const char * const drif0_groups[] = {
"drif0_ctrl_a",
"drif0_data0_a",
@@ -4120,6 +4184,7 @@ static const char * const drif3_groups[] = {
"drif3_data0_b",
"drif3_data1_b",
};
+#endif /* CONFIG_PINCTRL_PFC_R8A77990 */
static const char * const du_groups[] = {
"du_rgb666",
@@ -4305,6 +4370,18 @@ static const char * const pwm6_groups[] = {
"pwm6_b",
};
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
static const char * const scif0_groups[] = {
"scif0_data_a",
"scif0_clk_a",
@@ -4459,8 +4536,10 @@ static const char * const vin5_groups[] = {
};
static const struct {
- struct sh_pfc_function common[47];
+ struct sh_pfc_function common[49];
+#ifdef CONFIG_PINCTRL_PFC_R8A77990
struct sh_pfc_function automotive[4];
+#endif
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -4494,6 +4573,8 @@ static const struct {
SH_PFC_FUNCTION(pwm4),
SH_PFC_FUNCTION(pwm5),
SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -4511,12 +4592,14 @@ static const struct {
SH_PFC_FUNCTION(vin4),
SH_PFC_FUNCTION(vin5),
},
+#ifdef CONFIG_PINCTRL_PFC_R8A77990
.automotive = {
SH_PFC_FUNCTION(drif0),
SH_PFC_FUNCTION(drif1),
SH_PFC_FUNCTION(drif2),
SH_PFC_FUNCTION(drif3),
}
+#endif /* CONFIG_PINCTRL_PFC_R8A77990 */
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -5225,51 +5308,10 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a77990_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
-{
- const struct pinmux_bias_reg *reg;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return PIN_CONFIG_BIAS_DISABLE;
-
- if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
- return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
- return PIN_CONFIG_BIAS_PULL_UP;
- else
- return PIN_CONFIG_BIAS_PULL_DOWN;
-}
-
-static void r8a77990_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- const struct pinmux_bias_reg *reg;
- u32 enable, updown;
- unsigned int bit;
-
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
- if (!reg)
- return;
-
- enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
- if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= BIT(bit);
-
- updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= BIT(bit);
-
- sh_pfc_write(pfc, reg->pud, updown);
- sh_pfc_write(pfc, reg->puen, enable);
-}
-
static const struct sh_pfc_soc_operations r8a77990_pinmux_ops = {
.pin_to_pocctrl = r8a77990_pin_to_pocctrl,
- .get_bias = r8a77990_pinmux_get_bias,
- .set_bias = r8a77990_pinmux_set_bias,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
};
#ifdef CONFIG_PINCTRL_PFC_R8A774C0
diff --git a/drivers/pinctrl/renesas/pfc-sh73a0.c b/drivers/pinctrl/renesas/pfc-sh73a0.c
index afabd95105d5..96b91e95b1e1 100644
--- a/drivers/pinctrl/renesas/pfc-sh73a0.c
+++ b/drivers/pinctrl/renesas/pfc-sh73a0.c
@@ -4279,7 +4279,7 @@ static int sh73a0_vccq_mc0_get_voltage(struct regulator_dev *reg)
return 3300000;
}
-static struct regulator_ops sh73a0_vccq_mc0_ops = {
+static const struct regulator_ops sh73a0_vccq_mc0_ops = {
.enable = sh73a0_vccq_mc0_enable,
.disable = sh73a0_vccq_mc0_disable,
.is_enabled = sh73a0_vccq_mc0_is_enabled,
diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index 15dd007700c2..10020fe302b8 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -931,6 +931,7 @@ static int rza1_parse_pinmux_node(struct rza1_pinctrl *rza1_pctl,
case PIN_CONFIG_OUTPUT: /* for DT backwards compatibility */
case PIN_CONFIG_OUTPUT_ENABLE:
pinmux_flags |= MUX_FLAGS_SWIO_OUTPUT;
+ break;
default:
break;
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index 212a4a9c3a8f..ac542d278a38 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -26,9 +26,8 @@
#include "../pinconf.h"
struct sh_pfc_pin_config {
- unsigned int mux_mark;
- bool mux_set;
- bool gpio_enabled;
+ u16 gpio_enabled:1;
+ u16 mux_mark:15;
};
struct sh_pfc_pinctrl {
@@ -371,12 +370,11 @@ static int sh_pfc_func_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
goto done;
}
- /* All group pins are configured, mark the pins as mux_set */
+ /* All group pins are configured, mark the pins as muxed */
for (i = 0; i < grp->nr_pins; ++i) {
int idx = sh_pfc_get_pin_index(pfc, grp->pins[i]);
struct sh_pfc_pin_config *cfg = &pmx->configs[idx];
- cfg->mux_set = true;
cfg->mux_mark = grp->mux[i];
}
@@ -399,7 +397,7 @@ static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pfc->lock, flags);
if (!pfc->gpio) {
- /* If GPIOs are handled externally the pin mux type need to be
+ /* If GPIOs are handled externally the pin mux type needs to be
* set to GPIO here.
*/
const struct sh_pfc_pin *pin = &pfc->info->pins[idx];
@@ -432,11 +430,12 @@ static void sh_pfc_gpio_disable_free(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pfc->lock, flags);
cfg->gpio_enabled = false;
/* If mux is already set, this configures it here */
- if (cfg->mux_set)
+ if (cfg->mux_mark)
sh_pfc_config_mux(pfc, cfg->mux_mark, PINMUX_TYPE_FUNCTION);
spin_unlock_irqrestore(&pfc->lock, flags);
}
+#ifdef CONFIG_PINCTRL_SH_PFC_GPIO
static int sh_pfc_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset, bool input)
@@ -450,8 +449,8 @@ static int sh_pfc_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned int dir;
int ret;
- /* Check if the requested direction is supported by the pin. Not all SoC
- * provide pin config data, so perform the check conditionally.
+ /* Check if the requested direction is supported by the pin. Not all
+ * SoCs provide pin config data, so perform the check conditionally.
*/
if (pin->configs) {
dir = input ? SH_PFC_PIN_CFG_INPUT : SH_PFC_PIN_CFG_OUTPUT;
@@ -460,15 +459,13 @@ static int sh_pfc_gpio_set_direction(struct pinctrl_dev *pctldev,
}
spin_lock_irqsave(&pfc->lock, flags);
-
ret = sh_pfc_config_mux(pfc, pin->enum_id, new_type);
- if (ret < 0)
- goto done;
-
-done:
spin_unlock_irqrestore(&pfc->lock, flags);
return ret;
}
+#else
+#define sh_pfc_gpio_set_direction NULL
+#endif
static const struct pinmux_ops sh_pfc_pinmux_ops = {
.get_functions_count = sh_pfc_get_functions_count,
@@ -830,3 +827,46 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
return pinctrl_enable(pmx->pctl);
}
+
+unsigned int rcar_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
+{
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
+
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
+ return PIN_CONFIG_BIAS_DISABLE;
+
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
+ return PIN_CONFIG_BIAS_DISABLE;
+ else if (!reg->pud || (sh_pfc_read(pfc, reg->pud) & BIT(bit)))
+ return PIN_CONFIG_BIAS_PULL_UP;
+ else
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+}
+
+void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ const struct pinmux_bias_reg *reg;
+ u32 enable, updown;
+ unsigned int bit;
+
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
+ return;
+
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
+ if (bias != PIN_CONFIG_BIAS_DISABLE)
+ enable |= BIT(bit);
+
+ if (reg->pud) {
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= BIT(bit);
+
+ sh_pfc_write(pfc, reg->pud, updown);
+ }
+
+ sh_pfc_write(pfc, reg->puen, enable);
+}
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index eff1bb872325..dc484c13f59c 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -34,10 +34,10 @@ enum {
#define SH_PFC_PIN_CFG_NO_GPIO (1 << 31)
struct sh_pfc_pin {
- u16 pin;
- u16 enum_id;
const char *name;
unsigned int configs;
+ u16 pin;
+ u16 enum_id;
};
#define SH_PFC_PIN_GROUP_ALIAS(alias, n) \
@@ -270,8 +270,13 @@ struct sh_pfc_soc_info {
const char *name;
const struct sh_pfc_soc_operations *ops;
+#ifdef CONFIG_PINCTRL_SH_PFC_GPIO
struct pinmux_range input;
struct pinmux_range output;
+ const struct pinmux_irq *gpio_irq;
+ unsigned int gpio_irq_size;
+#endif
+
struct pinmux_range function;
const struct sh_pfc_pin *pins;
@@ -295,9 +300,6 @@ struct sh_pfc_soc_info {
const u16 *pinmux_data;
unsigned int pinmux_data_size;
- const struct pinmux_irq *gpio_irq;
- unsigned int gpio_irq_size;
-
u32 unlock_reg;
};
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 5e24838a582f..2223ead5bd72 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -108,19 +108,14 @@ static int s3c24xx_eint_get_trigger(unsigned int type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
return EINT_EDGE_RISING;
- break;
case IRQ_TYPE_EDGE_FALLING:
return EINT_EDGE_FALLING;
- break;
case IRQ_TYPE_EDGE_BOTH:
return EINT_EDGE_BOTH;
- break;
case IRQ_TYPE_LEVEL_HIGH:
return EINT_LEVEL_HIGH;
- break;
case IRQ_TYPE_LEVEL_LOW:
return EINT_LEVEL_LOW;
- break;
default:
return -EINVAL;
}
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index e39913a18139..d53a04597cbe 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -654,8 +654,6 @@ static const struct of_device_id spear300_pinctrl_of_match[] = {
static int spear300_pinctrl_probe(struct platform_device *pdev)
{
- int ret;
-
spear3xx_machdata.groups = spear300_pingroups;
spear3xx_machdata.ngroups = ARRAY_SIZE(spear300_pingroups);
spear3xx_machdata.functions = spear300_functions;
@@ -669,11 +667,7 @@ static int spear300_pinctrl_probe(struct platform_device *pdev)
pmx_init_addr(&spear3xx_machdata, PMX_CONFIG_REG);
- ret = spear_pinctrl_probe(pdev, &spear3xx_machdata);
- if (ret)
- return ret;
-
- return 0;
+ return spear_pinctrl_probe(pdev, &spear3xx_machdata);
}
static struct platform_driver spear300_pinctrl_driver = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
index 19cfd1e76ee2..e69f6da40dc0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
@@ -677,7 +677,7 @@ static const struct sunxi_desc_pin a100_pins[] = {
SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 19)),
};
-static const unsigned int a100_irq_bank_map[] = { 0, 1, 2, 3, 4, 5, 6};
+static const unsigned int a100_irq_bank_map[] = { 1, 2, 3, 4, 5, 6, 7};
static const struct sunxi_pinctrl_desc a100_pinctrl_data = {
.pins = a100_pins,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 8e792f8e2dc9..dc8d39ae045b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1139,8 +1139,9 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
if (irq == pctl->irq[bank])
break;
- if (bank == pctl->desc->irq_banks)
- return;
+ WARN_ON(bank == pctl->desc->irq_banks);
+
+ chained_irq_enter(chip, desc);
reg = sunxi_irq_status_reg_from_bank(pctl->desc, bank);
val = readl(pctl->membase + reg);
@@ -1148,14 +1149,14 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
if (val) {
int irqoffset;
- chained_irq_enter(chip, desc);
for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) {
int pin_irq = irq_find_mapping(pctl->domain,
bank * IRQ_PER_BANK + irqoffset);
generic_handle_irq(pin_irq);
}
- chained_irq_exit(chip, desc);
}
+
+ chained_irq_exit(chip, desc);
}
static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl,
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 0ecee8b8773d..7c92a6e22d75 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -742,12 +742,16 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
* Sensor events need to be parsed by the sensor sub-device.
* Defer them, and don't report the wakeup here.
*/
- if (event_type == EC_MKBP_EVENT_SENSOR_FIFO)
- *wake_event = false;
- /* Masked host-events should not count as wake events. */
- else if (host_event &&
- !(host_event & ec_dev->host_event_wake_mask))
+ if (event_type == EC_MKBP_EVENT_SENSOR_FIFO) {
*wake_event = false;
+ } else if (host_event) {
+ /* rtc_update_irq() already handles wakeup events. */
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC))
+ *wake_event = false;
+ /* Masked host-events should not count as wake events. */
+ if (!(host_event & ec_dev->host_event_wake_mask))
+ *wake_event = false;
+ }
}
return ret;
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 8111ed1fc574..c43868615790 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
@@ -14,6 +15,7 @@
#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#include <linux/usb/pd.h>
+#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
@@ -30,6 +32,12 @@ enum {
CROS_EC_ALTMODE_MAX,
};
+/* Container for altmode pointer nodes. */
+struct cros_typec_altmode_node {
+ struct typec_altmode *amode;
+ struct list_head list;
+};
+
/* Per port data. */
struct cros_typec_port {
struct typec_port *port;
@@ -48,6 +56,11 @@ struct cros_typec_port {
/* Port alt modes. */
struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX];
+
+ /* Flag indicating that PD discovery data parsing is completed. */
+ bool disc_done;
+ struct ec_response_typec_discovery *sop_disc;
+ struct list_head partner_mode_list;
};
/* Platform-specific data for the Chrome OS EC Type C controller. */
@@ -60,6 +73,7 @@ struct cros_typec_data {
struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
struct notifier_block nb;
struct work_struct port_work;
+ bool typec_cmd_supported;
};
static int cros_typec_parse_port_props(struct typec_capability *cap,
@@ -166,11 +180,25 @@ static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
return ret;
}
+static void cros_typec_unregister_altmodes(struct cros_typec_data *typec, int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct cros_typec_altmode_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &port->partner_mode_list, list) {
+ list_del(&node->list);
+ typec_unregister_altmode(node->amode);
+ devm_kfree(typec->dev, node);
+ }
+}
+
static void cros_typec_remove_partner(struct cros_typec_data *typec,
int port_num)
{
struct cros_typec_port *port = typec->ports[port_num];
+ cros_typec_unregister_altmodes(typec, port_num);
+
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
port->state.data = NULL;
@@ -181,6 +209,8 @@ static void cros_typec_remove_partner(struct cros_typec_data *typec,
typec_unregister_partner(port->partner);
port->partner = NULL;
+ memset(&port->p_identity, 0, sizeof(port->p_identity));
+ port->disc_done = false;
}
static void cros_unregister_ports(struct cros_typec_data *typec)
@@ -190,7 +220,10 @@ static void cros_unregister_ports(struct cros_typec_data *typec)
for (i = 0; i < typec->num_ports; i++) {
if (!typec->ports[i])
continue;
- cros_typec_remove_partner(typec, i);
+
+ if (typec->ports[i]->partner)
+ cros_typec_remove_partner(typec, i);
+
usb_role_switch_put(typec->ports[i]->role_sw);
typec_switch_put(typec->ports[i]->ori_sw);
typec_mux_put(typec->ports[i]->mux);
@@ -289,6 +322,14 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
port_num);
cros_typec_register_port_altmodes(typec, port_num);
+
+ cros_port->sop_disc = devm_kzalloc(dev, EC_PROTO2_MAX_RESPONSE_SIZE, GFP_KERNEL);
+ if (!cros_port->sop_disc) {
+ ret = -ENOMEM;
+ goto unregister_ports;
+ }
+
+ INIT_LIST_HEAD(&cros_port->partner_mode_list);
}
return 0;
@@ -329,74 +370,6 @@ static int cros_typec_ec_command(struct cros_typec_data *typec,
return ret;
}
-static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
- int port_num, struct ec_response_usb_pd_control *resp)
-{
- struct typec_port *port = typec->ports[port_num]->port;
- enum typec_orientation polarity;
-
- if (!resp->enabled)
- polarity = TYPEC_ORIENTATION_NONE;
- else if (!resp->polarity)
- polarity = TYPEC_ORIENTATION_NORMAL;
- else
- polarity = TYPEC_ORIENTATION_REVERSE;
-
- typec_set_pwr_role(port, resp->role ? TYPEC_SOURCE : TYPEC_SINK);
- typec_set_orientation(port, polarity);
-}
-
-static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
- int port_num, struct ec_response_usb_pd_control_v1 *resp)
-{
- struct typec_port *port = typec->ports[port_num]->port;
- enum typec_orientation polarity;
- bool pd_en;
- int ret;
-
- if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
- polarity = TYPEC_ORIENTATION_NONE;
- else if (!resp->polarity)
- polarity = TYPEC_ORIENTATION_NORMAL;
- else
- polarity = TYPEC_ORIENTATION_REVERSE;
- typec_set_orientation(port, polarity);
- typec_set_data_role(port, resp->role & PD_CTRL_RESP_ROLE_DATA ?
- TYPEC_HOST : TYPEC_DEVICE);
- typec_set_pwr_role(port, resp->role & PD_CTRL_RESP_ROLE_POWER ?
- TYPEC_SOURCE : TYPEC_SINK);
- typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
- TYPEC_SOURCE : TYPEC_SINK);
-
- /* Register/remove partners when a connect/disconnect occurs. */
- if (resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED) {
- if (typec->ports[port_num]->partner)
- return;
-
- pd_en = resp->enabled & PD_CTRL_RESP_ENABLED_PD_CAPABLE;
- ret = cros_typec_add_partner(typec, port_num, pd_en);
- if (ret)
- dev_warn(typec->dev,
- "Failed to register partner on port: %d\n",
- port_num);
- } else {
- if (!typec->ports[port_num]->partner)
- return;
- cros_typec_remove_partner(typec, port_num);
- }
-}
-
-static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
- struct ec_response_usb_pd_mux_info *resp)
-{
- struct ec_params_usb_pd_mux_info req = {
- .port = port_num,
- };
-
- return cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
- sizeof(req), resp, sizeof(*resp));
-}
-
static int cros_typec_usb_safe_state(struct cros_typec_port *port)
{
port->state.mode = TYPEC_STATE_SAFE;
@@ -563,15 +536,210 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
port->state.mode = TYPEC_STATE_USB;
ret = typec_mux_set(port->mux, &port->state);
} else {
- dev_info(typec->dev,
- "Unsupported mode requested, mux flags: %x\n",
- mux_flags);
- ret = -ENOTSUPP;
+ dev_dbg(typec->dev,
+ "Unrecognized mode requested, mux flags: %x\n",
+ mux_flags);
+ }
+
+ return ret;
+}
+
+static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control *resp)
+{
+ struct typec_port *port = typec->ports[port_num]->port;
+ enum typec_orientation polarity;
+
+ if (!resp->enabled)
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+
+ typec_set_pwr_role(port, resp->role ? TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_orientation(port, polarity);
+}
+
+static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control_v1 *resp)
+{
+ struct typec_port *port = typec->ports[port_num]->port;
+ enum typec_orientation polarity;
+ bool pd_en;
+ int ret;
+
+ if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+ typec_set_orientation(port, polarity);
+ typec_set_data_role(port, resp->role & PD_CTRL_RESP_ROLE_DATA ?
+ TYPEC_HOST : TYPEC_DEVICE);
+ typec_set_pwr_role(port, resp->role & PD_CTRL_RESP_ROLE_POWER ?
+ TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
+ TYPEC_SOURCE : TYPEC_SINK);
+
+ /* Register/remove partners when a connect/disconnect occurs. */
+ if (resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED) {
+ if (typec->ports[port_num]->partner)
+ return;
+
+ pd_en = resp->enabled & PD_CTRL_RESP_ENABLED_PD_CAPABLE;
+ ret = cros_typec_add_partner(typec, port_num, pd_en);
+ if (ret)
+ dev_warn(typec->dev,
+ "Failed to register partner on port: %d\n",
+ port_num);
+ } else {
+ if (!typec->ports[port_num]->partner)
+ return;
+ cros_typec_remove_partner(typec, port_num);
}
+}
+
+static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
+ struct ec_response_usb_pd_mux_info *resp)
+{
+ struct ec_params_usb_pd_mux_info req = {
+ .port = port_num,
+ };
+
+ return cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
+ sizeof(req), resp, sizeof(*resp));
+}
+
+static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_typec_discovery *sop_disc = port->sop_disc;
+ struct cros_typec_altmode_node *node;
+ struct typec_altmode_desc desc;
+ struct typec_altmode *amode;
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < sop_disc->svid_count; i++) {
+ for (j = 0; j < sop_disc->svids[i].mode_count; j++) {
+ memset(&desc, 0, sizeof(desc));
+ desc.svid = sop_disc->svids[i].svid;
+ desc.mode = j;
+ desc.vdo = sop_disc->svids[i].mode_vdo[j];
+
+ amode = typec_partner_register_altmode(port->partner, &desc);
+ if (IS_ERR(amode)) {
+ ret = PTR_ERR(amode);
+ goto err_cleanup;
+ }
+
+ /* If no memory is available we should unregister and exit. */
+ node = devm_kzalloc(typec->dev, sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ typec_unregister_altmode(amode);
+ goto err_cleanup;
+ }
+
+ node->amode = amode;
+ list_add_tail(&node->list, &port->partner_mode_list);
+ }
+ }
+
+ return 0;
+err_cleanup:
+ cros_typec_unregister_altmodes(typec, port_num);
return ret;
}
+static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_typec_discovery *sop_disc = port->sop_disc;
+ struct ec_params_typec_discovery req = {
+ .port = port_num,
+ .partner_type = TYPEC_PARTNER_SOP,
+ };
+ int ret = 0;
+ int i;
+
+ if (!port->partner) {
+ dev_err(typec->dev,
+ "SOP Discovery received without partner registered, port: %d\n",
+ port_num);
+ ret = -EINVAL;
+ goto disc_exit;
+ }
+
+ memset(sop_disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_typec_ec_command(typec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to get SOP discovery data for port: %d\n", port_num);
+ goto disc_exit;
+ }
+
+ /* First, update the PD identity VDOs for the partner. */
+ if (sop_disc->identity_count > 0)
+ port->p_identity.id_header = sop_disc->discovery_vdo[0];
+ if (sop_disc->identity_count > 1)
+ port->p_identity.cert_stat = sop_disc->discovery_vdo[1];
+ if (sop_disc->identity_count > 2)
+ port->p_identity.product = sop_disc->discovery_vdo[2];
+
+ /* Copy the remaining identity VDOs till a maximum of 6. */
+ for (i = 3; i < sop_disc->identity_count && i < VDO_MAX_OBJECTS; i++)
+ port->p_identity.vdo[i - 3] = sop_disc->discovery_vdo[i];
+
+ ret = typec_partner_set_identity(port->partner);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to update partner PD identity, port: %d\n", port_num);
+ goto disc_exit;
+ }
+
+ ret = cros_typec_register_altmodes(typec, port_num);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to register partner altmodes, port: %d\n", port_num);
+ goto disc_exit;
+ }
+
+disc_exit:
+ return ret;
+}
+
+static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
+{
+ struct ec_response_typec_status resp;
+ struct ec_params_typec_status req = {
+ .port = port_num,
+ };
+ int ret;
+
+ ret = cros_typec_ec_command(typec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(typec->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
+ return;
+ }
+
+ if (typec->ports[port_num]->disc_done)
+ return;
+
+ /* Handle any events appropriately. */
+ if (resp.events & PD_STATUS_EVENT_SOP_DISC_DONE) {
+ ret = cros_typec_handle_sop_disc(typec, port_num);
+ if (ret < 0) {
+ dev_err(typec->dev, "Couldn't parse SOP Disc data, port: %d\n", port_num);
+ return;
+ }
+
+ typec->ports[port_num]->disc_done = true;
+ }
+}
+
static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
{
struct ec_params_usb_pd_control req;
@@ -608,6 +776,9 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
cros_typec_set_port_params_v0(typec, port_num,
(struct ec_response_usb_pd_control *) &resp);
+ if (typec->typec_cmd_supported)
+ cros_typec_handle_status(typec, port_num);
+
/* Update the switches if they exist, according to requested state */
ret = cros_typec_get_mux_info(typec, port_num, &mux_resp);
if (ret < 0) {
@@ -656,6 +827,23 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
return 0;
}
+/* Check the EC feature flags to see if TYPEC_* commands are supported. */
+static int cros_typec_cmds_supported(struct cros_typec_data *typec)
+{
+ struct ec_response_get_features resp = {};
+ int ret;
+
+ ret = cros_typec_ec_command(typec, 0, EC_CMD_GET_FEATURES, NULL, 0,
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(typec->dev,
+ "Failed to get features, assuming typec commands unsupported.\n");
+ return 0;
+ }
+
+ return resp.flags[EC_FEATURE_TYPEC_CMD / 32] & EC_FEATURE_MASK_1(EC_FEATURE_TYPEC_CMD);
+}
+
static void cros_typec_port_work(struct work_struct *work)
{
struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
@@ -715,6 +903,8 @@ static int cros_typec_probe(struct platform_device *pdev)
return ret;
}
+ typec->typec_cmd_supported = !!cros_typec_cmds_supported(typec);
+
ret = cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
&resp, sizeof(resp));
if (ret < 0)
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 33040b0b3b79..2c941cdac9ee 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -5,6 +5,7 @@
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
+ depends on ACPI
default y
help
Say Y here to get to see options for platform-specific device drivers
@@ -29,20 +30,19 @@ config SURFACE3_WMI
config SURFACE_3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
- depends on ACPI && KEYBOARD_GPIO && I2C
+ depends on KEYBOARD_GPIO && I2C
help
This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
config SURFACE_3_POWER_OPREGION
tristate "Surface 3 battery platform operation region support"
- depends on ACPI && I2C
+ depends on I2C
help
This driver provides support for ACPI operation
region of the Surface 3 battery platform driver.
config SURFACE_GPE
tristate "Surface GPE/Lid Support Driver"
- depends on ACPI
depends on DMI
help
This driver marks the GPEs related to the ACPI lid device found on
@@ -52,7 +52,7 @@ config SURFACE_GPE
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
- depends on ACPI && INPUT
+ depends on INPUT
help
This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index e49e5d6d5d4e..86f6991b1215 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -181,12 +181,12 @@ static int surface_lid_enable_wakeup(struct device *dev, bool enable)
return 0;
}
-static int surface_gpe_suspend(struct device *dev)
+static int __maybe_unused surface_gpe_suspend(struct device *dev)
{
return surface_lid_enable_wakeup(dev, true);
}
-static int surface_gpe_resume(struct device *dev)
+static int __maybe_unused surface_gpe_resume(struct device *dev)
{
return surface_lid_enable_wakeup(dev, false);
}
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 0102bf1c7916..ef8342572463 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -85,7 +85,7 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
iowrite32(val, dev->regbase + reg_offset);
}
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
diff --git a/drivers/platform/x86/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell-wmi-sysman/sysman.c
index dc6dd531c996..cb81010ba1a2 100644
--- a/drivers/platform/x86/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell-wmi-sysman/sysman.c
@@ -419,13 +419,17 @@ static int init_bios_attributes(int attr_type, const char *guid)
return retval;
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, guid);
- if (!obj)
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE)
return -ENODEV;
elements = obj->package.elements;
mutex_lock(&wmi_priv.mutex);
while (elements) {
/* sanity checking */
+ if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
+ pr_debug("incorrect element type\n");
+ goto nextobj;
+ }
if (strlen(elements[ATTR_NAME].string.pointer) == 0) {
pr_debug("empty attribute found\n");
goto nextobj;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index ecd477964d11..e94e59283ecb 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -32,6 +32,10 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+static int enable_tablet_mode_sw = -1;
+module_param(enable_tablet_mode_sw, int, 0444);
+MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
+
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
@@ -247,7 +251,8 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
ret = bios_return->return_code;
if (ret) {
- if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+ if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+ ret != HPWMI_RET_UNKNOWN_CMDTYPE)
pr_warn("query 0x%x returned error 0x%x\n", query, ret);
goto out_free;
}
@@ -653,10 +658,12 @@ static int __init hp_wmi_input_setup(void)
}
/* Tablet mode */
- val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
- if (!(val < 0)) {
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ if (enable_tablet_mode_sw > 0) {
+ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
+ if (val >= 0) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
}
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index b457b0babde3..2cce82579d09 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -164,13 +164,29 @@ static const struct i2c_inst_data bsg2150_data[] = {
{}
};
-static const struct i2c_inst_data int3515_data[] = {
- { "tps6598x", IRQ_RESOURCE_APIC, 0 },
- { "tps6598x", IRQ_RESOURCE_APIC, 1 },
- { "tps6598x", IRQ_RESOURCE_APIC, 2 },
- { "tps6598x", IRQ_RESOURCE_APIC, 3 },
- {}
-};
+/*
+ * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
+ * issues. The most common problem seen is interrupt flood.
+ *
+ * There are at least two known causes. Firstly, on some boards, the
+ * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
+ * are not one-to-one mapped like in the array below. Secondly, on some boards
+ * the IRQ line from the PD controller is not actually connected at all. But the
+ * interrupt flood is also seen on some boards where those are not a problem, so
+ * there are some other problems as well.
+ *
+ * Because of the issues with the interrupt, the device is disabled for now. If
+ * you wish to debug the issues, uncomment the below, and add an entry for the
+ * INT3515 device to the i2c_multi_instance_ids table.
+ *
+ * static const struct i2c_inst_data int3515_data[] = {
+ * { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ * { }
+ * };
+ */
/*
* Note new device-ids must also be added to i2c_multi_instantiate_ids in
@@ -179,7 +195,6 @@ static const struct i2c_inst_data int3515_data[] = {
static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
{ "BSG1160", (unsigned long)bsg1160_data },
{ "BSG2150", (unsigned long)bsg2150_data },
- { "INT3515", (unsigned long)int3515_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 7598cd46cf60..5b81bafa5c16 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -92,6 +92,7 @@ struct ideapad_private {
struct dentry *debug;
unsigned long cfg;
bool has_hw_rfkill_switch;
+ bool has_touchpad_switch;
const char *fnesc_guid;
};
@@ -535,7 +536,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
} else if (attr == &dev_attr_fn_lock.attr) {
supported = acpi_has_method(priv->adev->handle, "HALS") &&
acpi_has_method(priv->adev->handle, "SALS");
- } else
+ } else if (attr == &dev_attr_touchpad.attr)
+ supported = priv->has_touchpad_switch;
+ else
supported = true;
return supported ? attr->mode : 0;
@@ -867,6 +870,9 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
{
unsigned long value;
+ if (!priv->has_touchpad_switch)
+ return;
+
/* Without reading from EC touchpad LED doesn't switch state */
if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
/* Some IdeaPads don't really turn off touchpad - they only
@@ -989,6 +995,9 @@ static int ideapad_acpi_add(struct platform_device *pdev)
priv->platform_device = pdev;
priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
+ /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+ priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1);
+
ret = ideapad_sysfs_init(priv);
if (ret)
return ret;
@@ -1006,6 +1015,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
if (!priv->has_hw_rfkill_switch)
write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
+ /* The same for Touchpad */
+ if (!priv->has_touchpad_switch)
+ write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
+
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
ideapad_register_rfkill(priv, i);
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 3b49a1f4061b..30a9062d2b4b 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -207,19 +207,19 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
},
},
{} /* Array terminator */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e03df2881dc6..f3e8eca8d86d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8783,6 +8783,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */
TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */
TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */
+ TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL), /* P53 / P73 */
TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
@@ -9951,9 +9952,9 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
return 0;
/* Otherwise, if there was an error return it */
- if (palm_err && (palm_err != ENODEV))
+ if (palm_err && (palm_err != -ENODEV))
return palm_err;
- if (lap_err && (lap_err != ENODEV))
+ if (lap_err && (lap_err != -ENODEV))
return lap_err;
if (has_palmsensor) {
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 5783139d0a11..c4de932302d6 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
.properties = digma_citi_e200_props,
};
+static const struct property_entry estar_beauty_hd_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = estar_beauty_hd_props,
+};
+
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -943,6 +953,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Estar Beauty HD (MID 7316R) */
+ .driver_data = (void *)&estar_beauty_hd_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+ },
+ },
+ {
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
.matches = {
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index d55b3727e00e..b22c4fdb2561 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -177,6 +177,13 @@ config POWER_RESET_QNAP
Say Y if you have a QNAP NAS.
+config POWER_RESET_REGULATOR
+ bool "Regulator subsystem power-off driver"
+ depends on OF && REGULATOR
+ help
+ This driver supports turning off your board by disabling a
+ power regulator defined in the devicetree.
+
config POWER_RESET_RESTART
bool "Restart power-off driver"
help
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index c51eceba9ea3..9dc49d3a57ff 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_POWER_RESET_OCELOT_RESET) += ocelot-reset.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
+obj-$(CONFIG_POWER_RESET_REGULATOR) += regulator-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
obj-$(CONFIG_POWER_RESET_ST) += st-poweroff.o
obj-$(CONFIG_POWER_RESET_VERSATILE) += arm-versatile-reboot.o
diff --git a/drivers/power/reset/ocelot-reset.c b/drivers/power/reset/ocelot-reset.c
index f74e1dbb4ba3..8caa90cb58fc 100644
--- a/drivers/power/reset/ocelot-reset.c
+++ b/drivers/power/reset/ocelot-reset.c
@@ -29,6 +29,8 @@ struct ocelot_reset_context {
struct notifier_block restart_handler;
};
+#define BIT_OFF_INVALID 32
+
#define SOFT_CHIP_RST BIT(0)
#define ICPU_CFG_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
@@ -50,9 +52,11 @@ static int ocelot_restart_handle(struct notifier_block *this,
ctx->props->vcore_protect, 0);
/* Make the SI back to boot mode */
- regmap_update_bits(ctx->cpu_ctrl, ICPU_CFG_CPU_SYSTEM_CTRL_GENERAL_CTRL,
- IF_SI_OWNER_MASK << if_si_owner_bit,
- IF_SI_OWNER_SIBM << if_si_owner_bit);
+ if (if_si_owner_bit != BIT_OFF_INVALID)
+ regmap_update_bits(ctx->cpu_ctrl,
+ ICPU_CFG_CPU_SYSTEM_CTRL_GENERAL_CTRL,
+ IF_SI_OWNER_MASK << if_si_owner_bit,
+ IF_SI_OWNER_SIBM << if_si_owner_bit);
pr_emerg("Resetting SoC\n");
@@ -96,6 +100,20 @@ static int ocelot_reset_probe(struct platform_device *pdev)
return err;
}
+static const struct reset_props reset_props_jaguar2 = {
+ .syscon = "mscc,ocelot-cpu-syscon",
+ .protect_reg = 0x20,
+ .vcore_protect = BIT(2),
+ .if_si_owner_bit = 6,
+};
+
+static const struct reset_props reset_props_luton = {
+ .syscon = "mscc,ocelot-cpu-syscon",
+ .protect_reg = 0x20,
+ .vcore_protect = BIT(2),
+ .if_si_owner_bit = BIT_OFF_INVALID, /* n/a */
+};
+
static const struct reset_props reset_props_ocelot = {
.syscon = "mscc,ocelot-cpu-syscon",
.protect_reg = 0x20,
@@ -112,6 +130,12 @@ static const struct reset_props reset_props_sparx5 = {
static const struct of_device_id ocelot_reset_of_match[] = {
{
+ .compatible = "mscc,jaguar2-chip-reset",
+ .data = &reset_props_jaguar2
+ }, {
+ .compatible = "mscc,luton-chip-reset",
+ .data = &reset_props_luton
+ }, {
.compatible = "mscc,ocelot-chip-reset",
.data = &reset_props_ocelot
}, {
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c
index 52b7dc61d870..0ddf7f25f7b8 100644
--- a/drivers/power/reset/qnap-poweroff.c
+++ b/drivers/power/reset/qnap-poweroff.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/serial_reg.h>
-#include <linux/kallsyms.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/clk.h>
@@ -75,7 +74,6 @@ static int qnap_power_off_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct resource *res;
struct clk *clk;
- char symname[KSYM_NAME_LEN];
const struct of_device_id *match =
of_match_node(qnap_power_off_of_match_table, np);
@@ -104,10 +102,8 @@ static int qnap_power_off_probe(struct platform_device *pdev)
/* Check that nothing else has already setup a handler */
if (pm_power_off) {
- lookup_symbol_name((ulong)pm_power_off, symname);
- dev_err(&pdev->dev,
- "pm_power_off already claimed %p %s",
- pm_power_off, symname);
+ dev_err(&pdev->dev, "pm_power_off already claimed for %ps",
+ pm_power_off);
return -EBUSY;
}
pm_power_off = qnap_power_off;
diff --git a/drivers/power/reset/regulator-poweroff.c b/drivers/power/reset/regulator-poweroff.c
new file mode 100644
index 000000000000..f697088e0ad1
--- /dev/null
+++ b/drivers/power/reset/regulator-poweroff.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Force-disables a regulator to power down a device
+ *
+ * Michael Klein <michael@fossekall.de>
+ *
+ * Copyright (C) 2020 Michael Klein
+ *
+ * Based on the gpio-poweroff driver.
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#define TIMEOUT_MS 3000
+
+/*
+ * Hold configuration here, cannot be more than one instance of the driver
+ * since pm_power_off itself is global.
+ */
+static struct regulator *cpu_regulator;
+
+static void regulator_poweroff_do_poweroff(void)
+{
+ if (cpu_regulator && regulator_is_enabled(cpu_regulator))
+ regulator_force_disable(cpu_regulator);
+
+ /* give it some time */
+ mdelay(TIMEOUT_MS);
+
+ WARN_ON(1);
+}
+
+static int regulator_poweroff_probe(struct platform_device *pdev)
+{
+ /* If a pm_power_off function has already been added, leave it alone */
+ if (pm_power_off != NULL) {
+ dev_err(&pdev->dev,
+ "%s: pm_power_off function already registered\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ cpu_regulator = devm_regulator_get(&pdev->dev, "cpu");
+ if (IS_ERR(cpu_regulator))
+ return PTR_ERR(cpu_regulator);
+
+ pm_power_off = &regulator_poweroff_do_poweroff;
+ return 0;
+}
+
+static int regulator_poweroff_remove(__maybe_unused struct platform_device *pdev)
+{
+ if (pm_power_off == &regulator_poweroff_do_poweroff)
+ pm_power_off = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id of_regulator_poweroff_match[] = {
+ { .compatible = "regulator-poweroff", },
+ {},
+};
+
+static struct platform_driver regulator_poweroff_driver = {
+ .probe = regulator_poweroff_probe,
+ .remove = regulator_poweroff_remove,
+ .driver = {
+ .name = "poweroff-regulator",
+ .of_match_table = of_regulator_poweroff_match,
+ },
+};
+
+module_platform_driver(regulator_poweroff_driver);
+
+MODULE_AUTHOR("Michael Klein <michael@fossekall.de>");
+MODULE_DESCRIPTION("Regulator poweroff driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:poweroff-regulator");
diff --git a/drivers/power/reset/syscon-poweroff.c b/drivers/power/reset/syscon-poweroff.c
index 4d6923b102b6..ed58bdf41e27 100644
--- a/drivers/power/reset/syscon-poweroff.c
+++ b/drivers/power/reset/syscon-poweroff.c
@@ -6,7 +6,6 @@
* Author: Moritz Fischer <moritz.fischer@ettus.com>
*/
-#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/notifier.h>
@@ -34,7 +33,6 @@ static void syscon_poweroff(void)
static int syscon_poweroff_probe(struct platform_device *pdev)
{
- char symname[KSYM_NAME_LEN];
int mask_err, value_err;
map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
@@ -65,10 +63,8 @@ static int syscon_poweroff_probe(struct platform_device *pdev)
}
if (pm_power_off) {
- lookup_symbol_name((ulong)pm_power_off, symname);
- dev_err(&pdev->dev,
- "pm_power_off already claimed %p %s",
- pm_power_off, symname);
+ dev_err(&pdev->dev, "pm_power_off already claimed for %ps",
+ pm_power_off);
return -EBUSY;
}
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 909f0242bacb..d20345386b1e 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -936,29 +936,23 @@ static struct ab8500_btemp_interrupts ab8500_btemp_irq[] = {
{"BTEMP_MEDIUM_HIGH", ab8500_btemp_medhigh_handler},
};
-#if defined(CONFIG_PM)
-static int ab8500_btemp_resume(struct platform_device *pdev)
+static int __maybe_unused ab8500_btemp_resume(struct device *dev)
{
- struct ab8500_btemp *di = platform_get_drvdata(pdev);
+ struct ab8500_btemp *di = dev_get_drvdata(dev);
ab8500_btemp_periodic(di, true);
return 0;
}
-static int ab8500_btemp_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused ab8500_btemp_suspend(struct device *dev)
{
- struct ab8500_btemp *di = platform_get_drvdata(pdev);
+ struct ab8500_btemp *di = dev_get_drvdata(dev);
ab8500_btemp_periodic(di, false);
return 0;
}
-#else
-#define ab8500_btemp_suspend NULL
-#define ab8500_btemp_resume NULL
-#endif
static int ab8500_btemp_remove(struct platform_device *pdev)
{
@@ -999,48 +993,45 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct abx500_bm_data *plat = pdev->dev.platform_data;
struct power_supply_config psy_cfg = {};
+ struct device *dev = &pdev->dev;
struct ab8500_btemp *di;
int irq, i, ret = 0;
u8 val;
- di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
- if (!di) {
- dev_err(&pdev->dev, "%s no mem for ab8500_btemp\n", __func__);
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
return -ENOMEM;
- }
if (!plat) {
- dev_err(&pdev->dev, "no battery management data supplied\n");
+ dev_err(dev, "no battery management data supplied\n");
return -EINVAL;
}
di->bm = plat;
if (np) {
- ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ ret = ab8500_bm_of_probe(dev, np, di->bm);
if (ret) {
- dev_err(&pdev->dev, "failed to get battery information\n");
+ dev_err(dev, "failed to get battery information\n");
return ret;
}
}
/* get parent data */
- di->dev = &pdev->dev;
+ di->dev = dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
/* Get ADC channels */
- di->btemp_ball = devm_iio_channel_get(&pdev->dev, "btemp_ball");
+ di->btemp_ball = devm_iio_channel_get(dev, "btemp_ball");
if (IS_ERR(di->btemp_ball)) {
- if (PTR_ERR(di->btemp_ball) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get BTEMP BALL ADC channel\n");
- return PTR_ERR(di->btemp_ball);
+ ret = dev_err_probe(dev, PTR_ERR(di->btemp_ball),
+ "failed to get BTEMP BALL ADC channel\n");
+ return ret;
}
- di->bat_ctrl = devm_iio_channel_get(&pdev->dev, "bat_ctrl");
+ di->bat_ctrl = devm_iio_channel_get(dev, "bat_ctrl");
if (IS_ERR(di->bat_ctrl)) {
- if (PTR_ERR(di->bat_ctrl) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get BAT CTRL ADC channel\n");
- return PTR_ERR(di->bat_ctrl);
+ ret = dev_err_probe(dev, PTR_ERR(di->bat_ctrl),
+ "failed to get BAT CTRL ADC channel\n");
+ return ret;
}
di->initialized = false;
@@ -1053,7 +1044,7 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
di->btemp_wq =
alloc_workqueue("ab8500_btemp_wq", WQ_MEM_RECLAIM, 0);
if (di->btemp_wq == NULL) {
- dev_err(di->dev, "failed to create work queue\n");
+ dev_err(dev, "failed to create work queue\n");
return -ENOMEM;
}
@@ -1065,10 +1056,10 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
- ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ ret = abx500_get_register_interruptible(dev, AB8500_CHARGER,
AB8500_BTEMP_HIGH_TH, &val);
if (ret < 0) {
- dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ dev_err(dev, "%s ab8500 read failed\n", __func__);
goto free_btemp_wq;
}
switch (val) {
@@ -1088,10 +1079,10 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
}
/* Register BTEMP power supply class */
- di->btemp_psy = power_supply_register(di->dev, &ab8500_btemp_desc,
+ di->btemp_psy = power_supply_register(dev, &ab8500_btemp_desc,
&psy_cfg);
if (IS_ERR(di->btemp_psy)) {
- dev_err(di->dev, "failed to register BTEMP psy\n");
+ dev_err(dev, "failed to register BTEMP psy\n");
ret = PTR_ERR(di->btemp_psy);
goto free_btemp_wq;
}
@@ -1105,15 +1096,15 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
}
ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_btemp_irq[i].name, di);
if (ret) {
- dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ dev_err(dev, "failed to request %s IRQ %d: %d\n"
, ab8500_btemp_irq[i].name, irq, ret);
goto free_irq;
}
- dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ dev_dbg(dev, "Requested %s IRQ %d: %d\n",
ab8500_btemp_irq[i].name, irq, ret);
}
@@ -1138,6 +1129,8 @@ free_btemp_wq:
return ret;
}
+static SIMPLE_DEV_PM_OPS(ab8500_btemp_pm_ops, ab8500_btemp_suspend, ab8500_btemp_resume);
+
static const struct of_device_id ab8500_btemp_match[] = {
{ .compatible = "stericsson,ab8500-btemp", },
{ },
@@ -1146,11 +1139,10 @@ static const struct of_device_id ab8500_btemp_match[] = {
static struct platform_driver ab8500_btemp_driver = {
.probe = ab8500_btemp_probe,
.remove = ab8500_btemp_remove,
- .suspend = ab8500_btemp_suspend,
- .resume = ab8500_btemp_resume,
.driver = {
.name = "ab8500-btemp",
.of_match_table = ab8500_btemp_match,
+ .pm = &ab8500_btemp_pm_ops,
},
};
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index db65be026920..ac77c8882d17 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3209,11 +3209,10 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
return NOTIFY_OK;
}
-#if defined(CONFIG_PM)
-static int ab8500_charger_resume(struct platform_device *pdev)
+static int __maybe_unused ab8500_charger_resume(struct device *dev)
{
int ret;
- struct ab8500_charger *di = platform_get_drvdata(pdev);
+ struct ab8500_charger *di = dev_get_drvdata(dev);
/*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
@@ -3247,10 +3246,9 @@ static int ab8500_charger_resume(struct platform_device *pdev)
return 0;
}
-static int ab8500_charger_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused ab8500_charger_suspend(struct device *dev)
{
- struct ab8500_charger *di = platform_get_drvdata(pdev);
+ struct ab8500_charger *di = dev_get_drvdata(dev);
/* Cancel any pending jobs */
cancel_delayed_work(&di->check_hw_failure_work);
@@ -3272,10 +3270,6 @@ static int ab8500_charger_suspend(struct platform_device *pdev,
return 0;
}
-#else
-#define ab8500_charger_suspend NULL
-#define ab8500_charger_resume NULL
-#endif
static struct notifier_block charger_nb = {
.notifier_call = ab8500_external_charger_prepare,
@@ -3354,23 +3348,22 @@ static int ab8500_charger_probe(struct platform_device *pdev)
struct power_supply_config ac_psy_cfg = {}, usb_psy_cfg = {};
struct ab8500_charger *di;
int irq, i, charger_status, ret = 0, ch_stat;
+ struct device *dev = &pdev->dev;
- di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
- if (!di) {
- dev_err(&pdev->dev, "%s no mem for ab8500_charger\n", __func__);
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
return -ENOMEM;
- }
if (!plat) {
- dev_err(&pdev->dev, "no battery management data supplied\n");
+ dev_err(dev, "no battery management data supplied\n");
return -EINVAL;
}
di->bm = plat;
if (np) {
- ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ ret = ab8500_bm_of_probe(dev, np, di->bm);
if (ret) {
- dev_err(&pdev->dev, "failed to get battery information\n");
+ dev_err(dev, "failed to get battery information\n");
return ret;
}
di->autopower_cfg = of_property_read_bool(np, "autopower_cfg");
@@ -3378,40 +3371,33 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->autopower_cfg = false;
/* get parent data */
- di->dev = &pdev->dev;
+ di->dev = dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
/* Get ADC channels */
- di->adc_main_charger_v = devm_iio_channel_get(&pdev->dev,
- "main_charger_v");
+ di->adc_main_charger_v = devm_iio_channel_get(dev, "main_charger_v");
if (IS_ERR(di->adc_main_charger_v)) {
- if (PTR_ERR(di->adc_main_charger_v) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get ADC main charger voltage\n");
- return PTR_ERR(di->adc_main_charger_v);
+ ret = dev_err_probe(dev, PTR_ERR(di->adc_main_charger_v),
+ "failed to get ADC main charger voltage\n");
+ return ret;
}
- di->adc_main_charger_c = devm_iio_channel_get(&pdev->dev,
- "main_charger_c");
+ di->adc_main_charger_c = devm_iio_channel_get(dev, "main_charger_c");
if (IS_ERR(di->adc_main_charger_c)) {
- if (PTR_ERR(di->adc_main_charger_c) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get ADC main charger current\n");
- return PTR_ERR(di->adc_main_charger_c);
+ ret = dev_err_probe(dev, PTR_ERR(di->adc_main_charger_c),
+ "failed to get ADC main charger current\n");
+ return ret;
}
- di->adc_vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
+ di->adc_vbus_v = devm_iio_channel_get(dev, "vbus_v");
if (IS_ERR(di->adc_vbus_v)) {
- if (PTR_ERR(di->adc_vbus_v) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get ADC USB charger voltage\n");
- return PTR_ERR(di->adc_vbus_v);
+ ret = dev_err_probe(dev, PTR_ERR(di->adc_vbus_v),
+ "failed to get ADC USB charger voltage\n");
+ return ret;
}
- di->adc_usb_charger_c = devm_iio_channel_get(&pdev->dev,
- "usb_charger_c");
+ di->adc_usb_charger_c = devm_iio_channel_get(dev, "usb_charger_c");
if (IS_ERR(di->adc_usb_charger_c)) {
- if (PTR_ERR(di->adc_usb_charger_c) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get ADC USB charger current\n");
- return PTR_ERR(di->adc_usb_charger_c);
+ ret = dev_err_probe(dev, PTR_ERR(di->adc_usb_charger_c),
+ "failed to get ADC USB charger current\n");
+ return ret;
}
/* initialize lock */
@@ -3467,7 +3453,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->charger_wq = alloc_ordered_workqueue("ab8500_charger_wq",
WQ_MEM_RECLAIM);
if (di->charger_wq == NULL) {
- dev_err(di->dev, "failed to create work queue\n");
+ dev_err(dev, "failed to create work queue\n");
return -ENOMEM;
}
@@ -3526,10 +3512,10 @@ static int ab8500_charger_probe(struct platform_device *pdev)
* is a charger connected to avoid erroneous BTEMP_HIGH/LOW
* interrupts during charging
*/
- di->regu = devm_regulator_get(di->dev, "vddadc");
+ di->regu = devm_regulator_get(dev, "vddadc");
if (IS_ERR(di->regu)) {
ret = PTR_ERR(di->regu);
- dev_err(di->dev, "failed to get vddadc regulator\n");
+ dev_err(dev, "failed to get vddadc regulator\n");
goto free_charger_wq;
}
@@ -3537,17 +3523,17 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* Initialize OVV, and other registers */
ret = ab8500_charger_init_hw_registers(di);
if (ret) {
- dev_err(di->dev, "failed to initialize ABB registers\n");
+ dev_err(dev, "failed to initialize ABB registers\n");
goto free_charger_wq;
}
/* Register AC charger class */
if (di->ac_chg.enabled) {
- di->ac_chg.psy = power_supply_register(di->dev,
+ di->ac_chg.psy = power_supply_register(dev,
&ab8500_ac_chg_desc,
&ac_psy_cfg);
if (IS_ERR(di->ac_chg.psy)) {
- dev_err(di->dev, "failed to register AC charger\n");
+ dev_err(dev, "failed to register AC charger\n");
ret = PTR_ERR(di->ac_chg.psy);
goto free_charger_wq;
}
@@ -3555,11 +3541,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* Register USB charger class */
if (di->usb_chg.enabled) {
- di->usb_chg.psy = power_supply_register(di->dev,
+ di->usb_chg.psy = power_supply_register(dev,
&ab8500_usb_chg_desc,
&usb_psy_cfg);
if (IS_ERR(di->usb_chg.psy)) {
- dev_err(di->dev, "failed to register USB charger\n");
+ dev_err(dev, "failed to register USB charger\n");
ret = PTR_ERR(di->usb_chg.psy);
goto free_ac;
}
@@ -3567,14 +3553,14 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(di->usb_phy)) {
- dev_err(di->dev, "failed to get usb transceiver\n");
+ dev_err(dev, "failed to get usb transceiver\n");
ret = -EINVAL;
goto free_usb;
}
di->nb.notifier_call = ab8500_charger_usb_notifier_call;
ret = usb_register_notifier(di->usb_phy, &di->nb);
if (ret) {
- dev_err(di->dev, "failed to register usb notifier\n");
+ dev_err(dev, "failed to register usb notifier\n");
goto put_usb_phy;
}
@@ -3603,15 +3589,15 @@ static int ab8500_charger_probe(struct platform_device *pdev)
}
ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_charger_irq[i].name, di);
if (ret != 0) {
- dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ dev_err(dev, "failed to request %s IRQ %d: %d\n"
, ab8500_charger_irq[i].name, irq, ret);
goto free_irq;
}
- dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ dev_dbg(dev, "Requested %s IRQ %d: %d\n",
ab8500_charger_irq[i].name, irq, ret);
}
@@ -3659,6 +3645,8 @@ free_charger_wq:
return ret;
}
+static SIMPLE_DEV_PM_OPS(ab8500_charger_pm_ops, ab8500_charger_suspend, ab8500_charger_resume);
+
static const struct of_device_id ab8500_charger_match[] = {
{ .compatible = "stericsson,ab8500-charger", },
{ },
@@ -3667,11 +3655,10 @@ static const struct of_device_id ab8500_charger_match[] = {
static struct platform_driver ab8500_charger_driver = {
.probe = ab8500_charger_probe,
.remove = ab8500_charger_remove,
- .suspend = ab8500_charger_suspend,
- .resume = ab8500_charger_resume,
.driver = {
.name = "ab8500-charger",
.of_match_table = ab8500_charger_match,
+ .pm = &ab8500_charger_pm_ops,
},
};
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 592a73d4dde6..3873e4857e3d 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2942,10 +2942,9 @@ static void ab8500_fg_sysfs_psy_remove_attrs(struct ab8500_fg *di)
/* Exposure to the sysfs interface <<END>> */
-#if defined(CONFIG_PM)
-static int ab8500_fg_resume(struct platform_device *pdev)
+static int __maybe_unused ab8500_fg_resume(struct device *dev)
{
- struct ab8500_fg *di = platform_get_drvdata(pdev);
+ struct ab8500_fg *di = dev_get_drvdata(dev);
/*
* Change state if we're not charging. If we're charging we will wake
@@ -2959,10 +2958,9 @@ static int ab8500_fg_resume(struct platform_device *pdev)
return 0;
}
-static int ab8500_fg_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused ab8500_fg_suspend(struct device *dev)
{
- struct ab8500_fg *di = platform_get_drvdata(pdev);
+ struct ab8500_fg *di = dev_get_drvdata(dev);
flush_delayed_work(&di->fg_periodic_work);
flush_work(&di->fg_work);
@@ -2980,10 +2978,6 @@ static int ab8500_fg_suspend(struct platform_device *pdev,
return 0;
}
-#else
-#define ab8500_fg_suspend NULL
-#define ab8500_fg_resume NULL
-#endif
static int ab8500_fg_remove(struct platform_device *pdev)
{
@@ -3007,14 +3001,11 @@ static int ab8500_fg_remove(struct platform_device *pdev)
}
/* ab8500 fg driver interrupts and their respective isr */
-static struct ab8500_fg_interrupts ab8500_fg_irq_th[] = {
+static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
{"NCONV_ACCU", ab8500_fg_cc_convend_handler},
{"BATT_OVV", ab8500_fg_batt_ovv_handler},
{"LOW_BAT_F", ab8500_fg_lowbatf_handler},
{"CC_INT_CALIB", ab8500_fg_cc_int_calib_handler},
-};
-
-static struct ab8500_fg_interrupts ab8500_fg_irq_bh[] = {
{"CCEOC", ab8500_fg_cc_data_end_handler},
};
@@ -3037,26 +3028,25 @@ static int ab8500_fg_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct abx500_bm_data *plat = pdev->dev.platform_data;
struct power_supply_config psy_cfg = {};
+ struct device *dev = &pdev->dev;
struct ab8500_fg *di;
int i, irq;
int ret = 0;
- di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
- if (!di) {
- dev_err(&pdev->dev, "%s no mem for ab8500_fg\n", __func__);
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
return -ENOMEM;
- }
if (!plat) {
- dev_err(&pdev->dev, "no battery management data supplied\n");
+ dev_err(dev, "no battery management data supplied\n");
return -EINVAL;
}
di->bm = plat;
if (np) {
- ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ ret = ab8500_bm_of_probe(dev, np, di->bm);
if (ret) {
- dev_err(&pdev->dev, "failed to get battery information\n");
+ dev_err(dev, "failed to get battery information\n");
return ret;
}
}
@@ -3064,15 +3054,14 @@ static int ab8500_fg_probe(struct platform_device *pdev)
mutex_init(&di->cc_lock);
/* get parent data */
- di->dev = &pdev->dev;
+ di->dev = dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->main_bat_v = devm_iio_channel_get(&pdev->dev, "main_bat_v");
+ di->main_bat_v = devm_iio_channel_get(dev, "main_bat_v");
if (IS_ERR(di->main_bat_v)) {
- if (PTR_ERR(di->main_bat_v) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get main battery ADC channel\n");
- return PTR_ERR(di->main_bat_v);
+ ret = dev_err_probe(dev, PTR_ERR(di->main_bat_v),
+ "failed to get main battery ADC channel\n");
+ return ret;
}
psy_cfg.supplied_to = supply_interface;
@@ -3094,7 +3083,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Create a work queue for running the FG algorithm */
di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM);
if (di->fg_wq == NULL) {
- dev_err(di->dev, "failed to create work queue\n");
+ dev_err(dev, "failed to create work queue\n");
return -ENOMEM;
}
@@ -3129,7 +3118,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Initialize OVV, and other registers */
ret = ab8500_fg_init_hw_registers(di);
if (ret) {
- dev_err(di->dev, "failed to initialize registers\n");
+ dev_err(dev, "failed to initialize registers\n");
goto free_inst_curr_wq;
}
@@ -3138,9 +3127,9 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->flags.batt_id_received = false;
/* Register FG power supply class */
- di->fg_psy = power_supply_register(di->dev, &ab8500_fg_desc, &psy_cfg);
+ di->fg_psy = power_supply_register(dev, &ab8500_fg_desc, &psy_cfg);
if (IS_ERR(di->fg_psy)) {
- dev_err(di->dev, "failed to register FG psy\n");
+ dev_err(dev, "failed to register FG psy\n");
ret = PTR_ERR(di->fg_psy);
goto free_inst_curr_wq;
}
@@ -3156,45 +3145,26 @@ static int ab8500_fg_probe(struct platform_device *pdev)
init_completion(&di->ab8500_fg_complete);
/* Register primary interrupt handlers */
- for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
+ for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
if (irq < 0) {
ret = irq;
- goto free_irq_th;
+ goto free_irq;
}
- ret = request_irq(irq, ab8500_fg_irq_th[i].isr,
- IRQF_SHARED | IRQF_NO_SUSPEND,
- ab8500_fg_irq_th[i].name, di);
+ ret = request_threaded_irq(irq, NULL, ab8500_fg_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ ab8500_fg_irq[i].name, di);
if (ret != 0) {
- dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
- ab8500_fg_irq_th[i].name, irq, ret);
- goto free_irq_th;
+ dev_err(dev, "failed to request %s IRQ %d: %d\n",
+ ab8500_fg_irq[i].name, irq, ret);
+ goto free_irq;
}
- dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
- ab8500_fg_irq_th[i].name, irq, ret);
+ dev_dbg(dev, "Requested %s IRQ %d: %d\n",
+ ab8500_fg_irq[i].name, irq, ret);
}
- /* Register threaded interrupt handler */
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
- if (irq < 0) {
- ret = irq;
- goto free_irq_th;
- }
-
- ret = request_threaded_irq(irq, NULL, ab8500_fg_irq_bh[0].isr,
- IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
- ab8500_fg_irq_bh[0].name, di);
-
- if (ret != 0) {
- dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
- ab8500_fg_irq_bh[0].name, irq, ret);
- goto free_irq_th;
- }
- dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
- ab8500_fg_irq_bh[0].name, irq, ret);
-
di->irq = platform_get_irq_byname(pdev, "CCEOC");
disable_irq(di->irq);
di->nbr_cceoc_irq_cnt = 0;
@@ -3203,13 +3173,13 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ret = ab8500_fg_sysfs_init(di);
if (ret) {
- dev_err(di->dev, "failed to create sysfs entry\n");
+ dev_err(dev, "failed to create sysfs entry\n");
goto free_irq;
}
ret = ab8500_fg_sysfs_psy_create_attrs(di);
if (ret) {
- dev_err(di->dev, "failed to create FG psy\n");
+ dev_err(dev, "failed to create FG psy\n");
ab8500_fg_sysfs_exit(di);
goto free_irq;
}
@@ -3230,12 +3200,9 @@ static int ab8500_fg_probe(struct platform_device *pdev)
free_irq:
/* We also have to free all registered irqs */
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
- free_irq(irq, di);
-free_irq_th:
while (--i >= 0) {
/* Last assignment of i from primary interrupt handlers */
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
free_irq(irq, di);
}
@@ -3245,6 +3212,8 @@ free_inst_curr_wq:
return ret;
}
+static SIMPLE_DEV_PM_OPS(ab8500_fg_pm_ops, ab8500_fg_suspend, ab8500_fg_resume);
+
static const struct of_device_id ab8500_fg_match[] = {
{ .compatible = "stericsson,ab8500-fg", },
{ },
@@ -3253,11 +3222,10 @@ static const struct of_device_id ab8500_fg_match[] = {
static struct platform_driver ab8500_fg_driver = {
.probe = ab8500_fg_probe,
.remove = ab8500_fg_remove,
- .suspend = ab8500_fg_suspend,
- .resume = ab8500_fg_resume,
.driver = {
.name = "ab8500-fg",
.of_match_table = ab8500_fg_match,
+ .pm = &ab8500_fg_pm_ops,
},
};
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index 175c4f3d7955..a9d84d845f24 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -1913,10 +1913,9 @@ static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
}
/* Exposure to the sysfs interface <<END>> */
-#if defined(CONFIG_PM)
-static int abx500_chargalg_resume(struct platform_device *pdev)
+static int __maybe_unused abx500_chargalg_resume(struct device *dev)
{
- struct abx500_chargalg *di = platform_get_drvdata(pdev);
+ struct abx500_chargalg *di = dev_get_drvdata(dev);
/* Kick charger watchdog if charging (any charger online) */
if (di->chg_info.online_chg)
@@ -1931,10 +1930,9 @@ static int abx500_chargalg_resume(struct platform_device *pdev)
return 0;
}
-static int abx500_chargalg_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused abx500_chargalg_suspend(struct device *dev)
{
- struct abx500_chargalg *di = platform_get_drvdata(pdev);
+ struct abx500_chargalg *di = dev_get_drvdata(dev);
if (di->chg_info.online_chg)
cancel_delayed_work_sync(&di->chargalg_wd_work);
@@ -1943,10 +1941,6 @@ static int abx500_chargalg_suspend(struct platform_device *pdev,
return 0;
}
-#else
-#define abx500_chargalg_suspend NULL
-#define abx500_chargalg_resume NULL
-#endif
static int abx500_chargalg_remove(struct platform_device *pdev)
{
@@ -2080,6 +2074,8 @@ free_chargalg_wq:
return ret;
}
+static SIMPLE_DEV_PM_OPS(abx500_chargalg_pm_ops, abx500_chargalg_suspend, abx500_chargalg_resume);
+
static const struct of_device_id ab8500_chargalg_match[] = {
{ .compatible = "stericsson,ab8500-chargalg", },
{ },
@@ -2088,11 +2084,10 @@ static const struct of_device_id ab8500_chargalg_match[] = {
static struct platform_driver abx500_chargalg_driver = {
.probe = abx500_chargalg_probe,
.remove = abx500_chargalg_remove,
- .suspend = abx500_chargalg_suspend,
- .resume = abx500_chargalg_resume,
.driver = {
.name = "ab8500-chargalg",
.of_match_table = ab8500_chargalg_match,
+ .pm = &abx500_chargalg_pm_ops,
},
};
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 0eaa86c52874..70b28b699a80 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -92,7 +92,7 @@ static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
power_supply_changed(power->supply);
- mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+ mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
return IRQ_HANDLED;
}
@@ -117,7 +117,7 @@ static void axp20x_usb_power_poll_vbus(struct work_struct *work)
out:
if (axp20x_usb_vbus_needs_polling(power))
- mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+ mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
}
static int axp20x_get_current_max(struct axp20x_usb_power *power, int *val)
@@ -397,7 +397,7 @@ static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
/*
- * The VBUS path select flag works differently on on AXP288 and newer:
+ * The VBUS path select flag works differently on AXP288 and newer:
* - On AXP20x and AXP22x, the flag enables VBUS (ignoring N_VBUSEN).
* - On AXP288 and AXP8xx, the flag disables VBUS (ignoring N_VBUSEN).
* We only expose the control on variants where it can be used to force
@@ -525,7 +525,7 @@ static int axp20x_usb_power_resume(struct device *dev)
while (i < power->num_irqs)
enable_irq(power->irqs[i++]);
- mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+ mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
return 0;
}
@@ -647,7 +647,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
if (axp20x_usb_vbus_needs_polling(power))
- queue_delayed_work(system_wq, &power->vbus_detect, 0);
+ queue_delayed_work(system_power_efficient_wq, &power->vbus_detect, 0);
return 0;
}
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 9d981b76c1e7..a4df1ea92386 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -548,14 +548,15 @@ out:
/*
* The HP Pavilion x2 10 series comes in a number of variants:
- * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D"
- * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E"
- * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4"
+ * Bay Trail SoC + AXP288 PMIC, Micro-USB, DMI_BOARD_NAME: "8021"
+ * Bay Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "815D"
+ * Cherry Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "813E"
+ * Cherry Trail SoC + TI PMIC, Type-C, DMI_BOARD_NAME: "827C" or "82F4"
*
- * The variants with the AXP288 PMIC are all kinds of special:
+ * The variants with the AXP288 + Type-C connector are all kinds of special:
*
- * 1. All variants use a Type-C connector which the AXP288 does not support, so
- * when using a Type-C charger it is not recognized. Unlike most AXP288 devices,
+ * 1. They use a Type-C connector which the AXP288 does not support, so when
+ * using a Type-C charger it is not recognized. Unlike most AXP288 devices,
* this model actually has mostly working ACPI AC / Battery code, the ACPI code
* "solves" this by simply setting the input_current_limit to 3A.
* There are still some issues with the ACPI code, so we use this native driver,
@@ -578,12 +579,17 @@ out:
*/
static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = {
{
- /*
- * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry
- * Trail model has "HP", so we only match on product_name.
- */
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "815D"),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "813E"),
},
},
{} /* Terminating entry */
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index d14186525e1e..4841e14a5bfb 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -16,7 +16,6 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/workqueue.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/extcon-provider.h>
@@ -448,8 +447,10 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
return -EINVAL;
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
if (ret)
@@ -1077,8 +1078,10 @@ static int bq24190_charger_get_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
@@ -1149,8 +1152,10 @@ static int bq24190_charger_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
@@ -1410,8 +1415,10 @@ static int bq24190_battery_get_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -1456,8 +1463,10 @@ static int bq24190_battery_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
diff --git a/drivers/power/supply/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c
index 6931e1d826f5..ab2f4bf8f603 100644
--- a/drivers/power/supply/bq24735-charger.c
+++ b/drivers/power/supply/bq24735-charger.c
@@ -18,7 +18,6 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 34c21c51bac1..945c3257ca93 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -299,7 +299,7 @@ static const union {
/* TODO: BQ25896 has max ICHG 3008 mA */
[TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */
[TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */
- [TBL_IILIM] = { .rt = {50000, 3200000, 50000} }, /* uA */
+ [TBL_IILIM] = { .rt = {100000, 3250000, 50000} }, /* uA */
[TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */
[TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */
[TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */
diff --git a/drivers/power/supply/collie_battery.c b/drivers/power/supply/collie_battery.c
index cbd588e9e233..7fb9b549f2de 100644
--- a/drivers/power/supply/collie_battery.c
+++ b/drivers/power/supply/collie_battery.c
@@ -12,7 +12,9 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/mfd/ucb1x00.h>
#include <asm/mach/sharpsl_param.h>
@@ -31,18 +33,18 @@ struct collie_bat {
struct mutex work_lock; /* protects data */
bool (*is_present)(struct collie_bat *bat);
- int gpio_full;
- int gpio_charge_on;
+ struct gpio_desc *gpio_full;
+ struct gpio_desc *gpio_charge_on;
int technology;
- int gpio_bat;
+ struct gpio_desc *gpio_bat;
int adc_bat;
int adc_bat_divider;
int bat_max;
int bat_min;
- int gpio_temp;
+ struct gpio_desc *gpio_temp;
int adc_temp;
int adc_temp_divider;
};
@@ -53,15 +55,15 @@ static unsigned long collie_read_bat(struct collie_bat *bat)
{
unsigned long value = 0;
- if (bat->gpio_bat < 0 || bat->adc_bat < 0)
+ if (!bat->gpio_bat || bat->adc_bat < 0)
return 0;
mutex_lock(&bat_lock);
- gpio_set_value(bat->gpio_bat, 1);
+ gpiod_set_value(bat->gpio_bat, 1);
msleep(5);
ucb1x00_adc_enable(ucb);
value = ucb1x00_adc_read(ucb, bat->adc_bat, UCB_SYNC);
ucb1x00_adc_disable(ucb);
- gpio_set_value(bat->gpio_bat, 0);
+ gpiod_set_value(bat->gpio_bat, 0);
mutex_unlock(&bat_lock);
value = value * 1000000 / bat->adc_bat_divider;
@@ -71,16 +73,16 @@ static unsigned long collie_read_bat(struct collie_bat *bat)
static unsigned long collie_read_temp(struct collie_bat *bat)
{
unsigned long value = 0;
- if (bat->gpio_temp < 0 || bat->adc_temp < 0)
+ if (!bat->gpio_temp || bat->adc_temp < 0)
return 0;
mutex_lock(&bat_lock);
- gpio_set_value(bat->gpio_temp, 1);
+ gpiod_set_value(bat->gpio_temp, 1);
msleep(5);
ucb1x00_adc_enable(ucb);
value = ucb1x00_adc_read(ucb, bat->adc_temp, UCB_SYNC);
ucb1x00_adc_disable(ucb);
- gpio_set_value(bat->gpio_temp, 0);
+ gpiod_set_value(bat->gpio_temp, 0);
mutex_unlock(&bat_lock);
value = value * 10000 / bat->adc_temp_divider;
@@ -162,23 +164,23 @@ static void collie_bat_update(struct collie_bat *bat)
bat->full_chrg = -1;
} else if (power_supply_am_i_supplied(psy)) {
if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) {
- gpio_set_value(bat->gpio_charge_on, 1);
+ gpiod_set_value(bat->gpio_charge_on, 1);
mdelay(15);
}
- if (gpio_get_value(bat->gpio_full)) {
+ if (gpiod_get_value(bat->gpio_full)) {
if (old == POWER_SUPPLY_STATUS_CHARGING ||
bat->full_chrg == -1)
bat->full_chrg = collie_read_bat(bat);
- gpio_set_value(bat->gpio_charge_on, 0);
+ gpiod_set_value(bat->gpio_charge_on, 0);
bat->status = POWER_SUPPLY_STATUS_FULL;
} else {
- gpio_set_value(bat->gpio_charge_on, 1);
+ gpiod_set_value(bat->gpio_charge_on, 1);
bat->status = POWER_SUPPLY_STATUS_CHARGING;
}
} else {
- gpio_set_value(bat->gpio_charge_on, 0);
+ gpiod_set_value(bat->gpio_charge_on, 0);
bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
}
@@ -230,18 +232,18 @@ static struct collie_bat collie_bat_main = {
.full_chrg = -1,
.psy = NULL,
- .gpio_full = COLLIE_GPIO_CO,
- .gpio_charge_on = COLLIE_GPIO_CHARGE_ON,
+ .gpio_full = NULL,
+ .gpio_charge_on = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .gpio_bat = COLLIE_GPIO_MBAT_ON,
+ .gpio_bat = NULL,
.adc_bat = UCB_ADC_INP_AD1,
.adc_bat_divider = 155,
.bat_max = 4310000,
.bat_min = 1551 * 1000000 / 414,
- .gpio_temp = COLLIE_GPIO_TMP_ON,
+ .gpio_temp = NULL,
.adc_temp = UCB_ADC_INP_AD0,
.adc_temp_divider = 10000,
};
@@ -260,30 +262,24 @@ static struct collie_bat collie_bat_bu = {
.full_chrg = -1,
.psy = NULL,
- .gpio_full = -1,
- .gpio_charge_on = -1,
+ .gpio_full = NULL,
+ .gpio_charge_on = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LiMn,
- .gpio_bat = COLLIE_GPIO_BBAT_ON,
+ .gpio_bat = NULL,
.adc_bat = UCB_ADC_INP_AD1,
.adc_bat_divider = 155,
.bat_max = 3000000,
.bat_min = 1900000,
- .gpio_temp = -1,
+ .gpio_temp = NULL,
.adc_temp = -1,
.adc_temp_divider = -1,
};
-static struct gpio collie_batt_gpios[] = {
- { COLLIE_GPIO_CO, GPIOF_IN, "main battery full" },
- { COLLIE_GPIO_MAIN_BAT_LOW, GPIOF_IN, "main battery low" },
- { COLLIE_GPIO_CHARGE_ON, GPIOF_OUT_INIT_LOW, "main charge on" },
- { COLLIE_GPIO_MBAT_ON, GPIOF_OUT_INIT_LOW, "main battery" },
- { COLLIE_GPIO_TMP_ON, GPIOF_OUT_INIT_LOW, "main battery temp" },
- { COLLIE_GPIO_BBAT_ON, GPIOF_OUT_INIT_LOW, "backup battery" },
-};
+/* Obtained but unused GPIO */
+static struct gpio_desc *collie_mbat_low;
#ifdef CONFIG_PM
static int wakeup_enabled;
@@ -295,7 +291,7 @@ static int collie_bat_suspend(struct ucb1x00_dev *dev)
if (device_may_wakeup(&dev->ucb->dev) &&
collie_bat_main.status == POWER_SUPPLY_STATUS_CHARGING)
- wakeup_enabled = !enable_irq_wake(gpio_to_irq(COLLIE_GPIO_CO));
+ wakeup_enabled = !enable_irq_wake(gpiod_to_irq(collie_bat_main.gpio_full));
else
wakeup_enabled = 0;
@@ -305,7 +301,7 @@ static int collie_bat_suspend(struct ucb1x00_dev *dev)
static int collie_bat_resume(struct ucb1x00_dev *dev)
{
if (wakeup_enabled)
- disable_irq_wake(gpio_to_irq(COLLIE_GPIO_CO));
+ disable_irq_wake(gpiod_to_irq(collie_bat_main.gpio_full));
/* things may have changed while we were away */
schedule_work(&bat_work);
@@ -320,16 +316,71 @@ static int collie_bat_probe(struct ucb1x00_dev *dev)
{
int ret;
struct power_supply_config psy_main_cfg = {}, psy_bu_cfg = {};
+ struct gpio_chip *gc = &dev->ucb->gpio;
if (!machine_is_collie())
return -ENODEV;
ucb = dev->ucb;
- ret = gpio_request_array(collie_batt_gpios,
- ARRAY_SIZE(collie_batt_gpios));
- if (ret)
- return ret;
+ /* Obtain all the main battery GPIOs */
+ collie_bat_main.gpio_full = gpiod_get(&dev->ucb->dev,
+ "main battery full",
+ GPIOD_IN);
+ if (IS_ERR(collie_bat_main.gpio_full))
+ return PTR_ERR(collie_bat_main.gpio_full);
+
+ collie_mbat_low = gpiod_get(&dev->ucb->dev,
+ "main battery low",
+ GPIOD_IN);
+ if (IS_ERR(collie_mbat_low)) {
+ ret = PTR_ERR(collie_mbat_low);
+ goto err_put_gpio_full;
+ }
+
+ collie_bat_main.gpio_charge_on = gpiod_get(&dev->ucb->dev,
+ "main charge on",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(collie_bat_main.gpio_charge_on)) {
+ ret = PTR_ERR(collie_bat_main.gpio_charge_on);
+ goto err_put_mbat_low;
+ }
+
+ /* COLLIE_GPIO_MBAT_ON = GPIO 7 on the UCB (TC35143) */
+ collie_bat_main.gpio_bat = gpiochip_request_own_desc(gc,
+ 7,
+ "main battery",
+ GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(collie_bat_main.gpio_bat)) {
+ ret = PTR_ERR(collie_bat_main.gpio_bat);
+ goto err_put_gpio_charge_on;
+ }
+
+ /* COLLIE_GPIO_TMP_ON = GPIO 9 on the UCB (TC35143) */
+ collie_bat_main.gpio_temp = gpiochip_request_own_desc(gc,
+ 9,
+ "main battery temp",
+ GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(collie_bat_main.gpio_temp)) {
+ ret = PTR_ERR(collie_bat_main.gpio_temp);
+ goto err_free_gpio_bat;
+ }
+
+ /*
+ * Obtain the backup battery COLLIE_GPIO_BBAT_ON which is
+ * GPIO 8 on the UCB (TC35143)
+ */
+ collie_bat_bu.gpio_bat = gpiochip_request_own_desc(gc,
+ 8,
+ "backup battery",
+ GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(collie_bat_bu.gpio_bat)) {
+ ret = PTR_ERR(collie_bat_bu.gpio_bat);
+ goto err_free_gpio_temp;
+ }
mutex_init(&collie_bat_main.work_lock);
@@ -370,27 +421,43 @@ err_irq:
err_psy_reg_bu:
power_supply_unregister(collie_bat_main.psy);
err_psy_reg_main:
-
/* see comment in collie_bat_remove */
cancel_work_sync(&bat_work);
- gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios));
+ gpiochip_free_own_desc(collie_bat_bu.gpio_bat);
+err_free_gpio_temp:
+ gpiochip_free_own_desc(collie_bat_main.gpio_temp);
+err_free_gpio_bat:
+ gpiochip_free_own_desc(collie_bat_main.gpio_bat);
+err_put_gpio_charge_on:
+ gpiod_put(collie_bat_main.gpio_charge_on);
+err_put_mbat_low:
+ gpiod_put(collie_mbat_low);
+err_put_gpio_full:
+ gpiod_put(collie_bat_main.gpio_full);
+
return ret;
}
static void collie_bat_remove(struct ucb1x00_dev *dev)
{
free_irq(gpio_to_irq(COLLIE_GPIO_CO), &collie_bat_main);
-
power_supply_unregister(collie_bat_bu.psy);
power_supply_unregister(collie_bat_main.psy);
+ /* These are obtained from the machine */
+ gpiod_put(collie_bat_main.gpio_full);
+ gpiod_put(collie_mbat_low);
+ gpiod_put(collie_bat_main.gpio_charge_on);
+ /* These are directly from the UCB so let's free them */
+ gpiochip_free_own_desc(collie_bat_main.gpio_bat);
+ gpiochip_free_own_desc(collie_bat_main.gpio_temp);
+ gpiochip_free_own_desc(collie_bat_bu.gpio_bat);
/*
* Now cancel the bat_work. We won't get any more schedules,
* since all sources (isr and external_power_changed) are
* unregistered now.
*/
cancel_work_sync(&bat_work);
- gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios));
}
static struct ucb1x00_driver collie_bat_driver = {
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index caa829738ef7..0032069fbc2b 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
@@ -52,6 +52,7 @@ struct gab {
int level;
int status;
bool cable_plugged;
+ struct gpio_desc *charge_finished;
};
static struct gab *to_generic_bat(struct power_supply *psy)
@@ -91,13 +92,9 @@ static const enum power_supply_property gab_dyn_props[] = {
static bool gab_charge_finished(struct gab *adc_bat)
{
- struct gab_platform_data *pdata = adc_bat->pdata;
- bool ret = gpio_get_value(pdata->gpio_charge_finished);
- bool inv = pdata->gpio_inverted;
-
- if (!gpio_is_valid(pdata->gpio_charge_finished))
+ if (!adc_bat->charge_finished)
return false;
- return ret ^ inv;
+ return gpiod_get_value(adc_bat->charge_finished);
}
static int gab_get_status(struct gab *adc_bat)
@@ -327,18 +324,17 @@ static int gab_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&adc_bat->bat_work, gab_work);
- if (gpio_is_valid(pdata->gpio_charge_finished)) {
+ adc_bat->charge_finished = devm_gpiod_get_optional(&pdev->dev,
+ "charged", GPIOD_IN);
+ if (adc_bat->charge_finished) {
int irq;
- ret = gpio_request(pdata->gpio_charge_finished, "charged");
- if (ret)
- goto gpio_req_fail;
- irq = gpio_to_irq(pdata->gpio_charge_finished);
+ irq = gpiod_to_irq(adc_bat->charge_finished);
ret = request_any_context_irq(irq, gab_charged,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"battery charged", adc_bat);
if (ret < 0)
- goto err_gpio;
+ goto gpio_req_fail;
}
platform_set_drvdata(pdev, adc_bat);
@@ -348,8 +344,6 @@ static int gab_probe(struct platform_device *pdev)
msecs_to_jiffies(0));
return 0;
-err_gpio:
- gpio_free(pdata->gpio_charge_finished);
gpio_req_fail:
power_supply_unregister(adc_bat->psy);
err_reg_fail:
@@ -367,14 +361,11 @@ static int gab_remove(struct platform_device *pdev)
{
int chan;
struct gab *adc_bat = platform_get_drvdata(pdev);
- struct gab_platform_data *pdata = adc_bat->pdata;
power_supply_unregister(adc_bat->psy);
- if (gpio_is_valid(pdata->gpio_charge_finished)) {
- free_irq(gpio_to_irq(pdata->gpio_charge_finished), adc_bat);
- gpio_free(pdata->gpio_charge_finished);
- }
+ if (adc_bat->charge_finished)
+ free_irq(gpiod_to_irq(adc_bat->charge_finished), adc_bat);
for (chan = 0; chan < ARRAY_SIZE(gab_chan_name); chan++) {
if (adc_bat->channel[chan])
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index f284547913d6..79d4b5988360 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -78,6 +78,7 @@ static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
@@ -85,9 +86,10 @@ static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_TEMP_MAX,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ // these two have to be at the end on the list
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
- POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
};
static int max17042_get_temperature(struct max17042_chip *chip, int *temp)
@@ -353,7 +355,8 @@ static int max17042_get_property(struct power_supply *psy,
if (ret < 0)
return ret;
- val->intval = data * 1000 / 2;
+ data64 = sign_extend64(data, 15) * 5000000ll;
+ val->intval = div_s64(data64, chip->pdata->r_sns);
break;
case POWER_SUPPLY_PROP_TEMP:
ret = max17042_get_temperature(chip, &val->intval);
@@ -394,8 +397,8 @@ static int max17042_get_property(struct power_supply *psy,
if (ret < 0)
return ret;
- val->intval = sign_extend32(data, 15);
- val->intval *= 1562500 / chip->pdata->r_sns;
+ data64 = sign_extend64(data, 15) * 1562500ll;
+ val->intval = div_s64(data64, chip->pdata->r_sns);
} else {
return -EINVAL;
}
@@ -406,12 +409,20 @@ static int max17042_get_property(struct power_supply *psy,
if (ret < 0)
return ret;
- val->intval = sign_extend32(data, 15);
- val->intval *= 1562500 / chip->pdata->r_sns;
+ data64 = sign_extend64(data, 15) * 1562500ll;
+ val->intval = div_s64(data64, chip->pdata->r_sns);
} else {
return -EINVAL;
}
break;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ ret = regmap_read(map, MAX17042_ICHGTerm, &data);
+ if (ret < 0)
+ return ret;
+
+ data64 = data * 1562500ll;
+ val->intval = div_s64(data64, chip->pdata->r_sns);
+ break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
ret = regmap_read(map, MAX17042_TTE, &data);
if (ret < 0)
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index f5e84cd47924..1947af25879a 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -13,6 +13,20 @@
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
+/* MAX8997_REG_STATUS4 */
+#define DCINOK_SHIFT 1
+#define DCINOK_MASK (1 << DCINOK_SHIFT)
+#define DETBAT_SHIFT 2
+#define DETBAT_MASK (1 << DETBAT_SHIFT)
+
+/* MAX8997_REG_MBCCTRL1 */
+#define TFCH_SHIFT 4
+#define TFCH_MASK (7 << TFCH_SHIFT)
+
+/* MAX8997_REG_MBCCTRL5 */
+#define ITOPOFF_SHIFT 0
+#define ITOPOFF_MASK (0xF << ITOPOFF_SHIFT)
+
struct charger_data {
struct device *dev;
struct max8997_dev *iodev;
@@ -20,7 +34,7 @@ struct charger_data {
};
static enum power_supply_property max8997_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS, /* "FULL" or "NOT FULL" only. */
+ POWER_SUPPLY_PROP_STATUS, /* "FULL", "CHARGING" or "DISCHARGING". */
POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
};
@@ -43,6 +57,10 @@ static int max8997_battery_get_property(struct power_supply *psy,
return ret;
if ((reg & (1 << 0)) == 0x1)
val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if ((reg & DCINOK_MASK))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
case POWER_SUPPLY_PROP_PRESENT:
@@ -50,7 +68,7 @@ static int max8997_battery_get_property(struct power_supply *psy,
ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
if (ret)
return ret;
- if ((reg & (1 << 2)) == 0x0)
+ if ((reg & DETBAT_MASK) == 0x0)
val->intval = 1;
break;
@@ -59,8 +77,7 @@ static int max8997_battery_get_property(struct power_supply *psy,
ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
if (ret)
return ret;
- /* DCINOK */
- if (reg & (1 << 1))
+ if (reg & DCINOK_MASK)
val->intval = 1;
break;
@@ -84,11 +101,14 @@ static int max8997_battery_probe(struct platform_device *pdev)
int ret = 0;
struct charger_data *charger;
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct i2c_client *i2c = iodev->i2c;
+ struct max8997_platform_data *pdata = iodev->pdata;
struct power_supply_config psy_cfg = {};
- if (!pdata)
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied.\n");
return -EINVAL;
+ }
if (pdata->eoc_mA) {
int val = (pdata->eoc_mA - 50) / 10;
@@ -97,30 +117,29 @@ static int max8997_battery_probe(struct platform_device *pdev)
if (val > 0xf)
val = 0xf;
- ret = max8997_update_reg(iodev->i2c,
- MAX8997_REG_MBCCTRL5, val, 0xf);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL5,
+ val << ITOPOFF_SHIFT, ITOPOFF_MASK);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot use i2c bus.\n");
return ret;
}
}
-
switch (pdata->timeout) {
case 5:
- ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
- 0x2 << 4, 0x7 << 4);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL1,
+ 0x2 << TFCH_SHIFT, TFCH_MASK);
break;
case 6:
- ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
- 0x3 << 4, 0x7 << 4);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL1,
+ 0x3 << TFCH_SHIFT, TFCH_MASK);
break;
case 7:
- ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
- 0x4 << 4, 0x7 << 4);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL1,
+ 0x4 << TFCH_SHIFT, TFCH_MASK);
break;
case 0:
- ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
- 0x7 << 4, 0x7 << 4);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL1,
+ 0x7 << TFCH_SHIFT, TFCH_MASK);
break;
default:
dev_err(&pdev->dev, "incorrect timeout value (%d)\n",
@@ -138,7 +157,6 @@ static int max8997_battery_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, charger);
-
charger->dev = &pdev->dev;
charger->iodev = iodev;
@@ -168,18 +186,7 @@ static struct platform_driver max8997_battery_driver = {
.probe = max8997_battery_probe,
.id_table = max8997_battery_id,
};
-
-static int __init max8997_battery_init(void)
-{
- return platform_driver_register(&max8997_battery_driver);
-}
-subsys_initcall(max8997_battery_init);
-
-static void __exit max8997_battery_cleanup(void)
-{
- platform_driver_unregister(&max8997_battery_driver);
-}
-module_exit(max8997_battery_cleanup);
+module_platform_driver(max8997_battery_driver);
MODULE_DESCRIPTION("MAXIM 8997/8966 battery control driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
index 2df6a2459d1f..ac06ecf7fc9c 100644
--- a/drivers/power/supply/pm2301_charger.c
+++ b/drivers/power/supply/pm2301_charger.c
@@ -455,7 +455,6 @@ static int pm2_int_reg4(void *pm2_data, int val)
static int pm2_int_reg5(void *pm2_data, int val)
{
struct pm2xxx_charger *pm2 = pm2_data;
- int ret = 0;
if (val & (PM2XXX_INT6_ITVPWR2DROP | PM2XXX_INT6_ITVPWR1DROP)) {
dev_dbg(pm2->dev, "VMPWR drop to VBAT level\n");
@@ -468,7 +467,7 @@ static int pm2_int_reg5(void *pm2_data, int val)
dev_dbg(pm2->dev, "Falling/Rising edge on WPWR1/2\n");
}
- return ret;
+ return 0;
}
static irqreturn_t pm2xxx_irq_int(int irq, void *data)
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index a616b9d8f43c..92dd63171193 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -402,7 +402,7 @@ void power_supply_init_attrs(struct device_type *dev_type)
struct device_attribute *attr;
if (!power_supply_attrs[i].prop_name) {
- pr_warn("%s: Property %d skipped because is is missing from power_supply_attrs\n",
+ pr_warn("%s: Property %d skipped because it is missing from power_supply_attrs\n",
__func__, i);
sprintf(power_supply_attrs[i].attr_name, "_err_%d", i);
} else {
diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
index 60b7f41ab063..a2addc24ee8b 100644
--- a/drivers/power/supply/s3c_adc_battery.c
+++ b/drivers/power/supply/s3c_adc_battery.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/leds.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
@@ -31,6 +31,7 @@ struct s3c_adc_bat {
struct power_supply *psy;
struct s3c_adc_client *client;
struct s3c_adc_bat_pdata *pdata;
+ struct gpio_desc *charge_finished;
int volt_value;
int cur_value;
unsigned int timestamp;
@@ -132,9 +133,7 @@ static int calc_full_volt(int volt_val, int cur_val, int impedance)
static int charge_finished(struct s3c_adc_bat *bat)
{
- return bat->pdata->gpio_inverted ?
- !gpio_get_value(bat->pdata->gpio_charge_finished) :
- gpio_get_value(bat->pdata->gpio_charge_finished);
+ return gpiod_get_value(bat->charge_finished);
}
static int s3c_adc_bat_get_property(struct power_supply *psy,
@@ -169,7 +168,7 @@ static int s3c_adc_bat_get_property(struct power_supply *psy,
}
if (bat->cable_plugged &&
- ((bat->pdata->gpio_charge_finished < 0) ||
+ (!bat->charge_finished ||
!charge_finished(bat))) {
lut = bat->pdata->lut_acin;
lut_size = bat->pdata->lut_acin_cnt;
@@ -206,7 +205,7 @@ static int s3c_adc_bat_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (bat->pdata->gpio_charge_finished < 0)
+ if (!bat->charge_finished)
val->intval = bat->level == 100000 ?
POWER_SUPPLY_STATUS_FULL : bat->status;
else
@@ -265,7 +264,7 @@ static void s3c_adc_bat_work(struct work_struct *work)
bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
}
} else {
- if ((bat->pdata->gpio_charge_finished >= 0) && is_plugged) {
+ if (bat->charge_finished && is_plugged) {
is_charged = charge_finished(&main_bat);
if (is_charged) {
if (bat->pdata->disable_charger)
@@ -294,6 +293,7 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
struct s3c_adc_client *client;
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
struct power_supply_config psy_cfg = {};
+ struct gpio_desc *gpiod;
int ret;
client = s3c_adc_register(pdev, NULL, NULL, 0);
@@ -304,8 +304,17 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, client);
+ gpiod = devm_gpiod_get_optional(&pdev->dev, "charge-status", GPIOD_IN);
+ if (IS_ERR(gpiod)) {
+ /* Could be probe deferral etc */
+ ret = PTR_ERR(gpiod);
+ dev_err(&pdev->dev, "no GPIO %d\n", ret);
+ return ret;
+ }
+
main_bat.client = client;
main_bat.pdata = pdata;
+ main_bat.charge_finished = gpiod;
main_bat.volt_value = -1;
main_bat.cur_value = -1;
main_bat.cable_plugged = 0;
@@ -323,6 +332,7 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
backup_bat.client = client;
backup_bat.pdata = pdev->dev.platform_data;
+ backup_bat.charge_finished = gpiod;
backup_bat.volt_value = -1;
backup_bat.psy = power_supply_register(&pdev->dev,
&backup_bat_desc,
@@ -335,12 +345,8 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&bat_work, s3c_adc_bat_work);
- if (pdata->gpio_charge_finished >= 0) {
- ret = gpio_request(pdata->gpio_charge_finished, "charged");
- if (ret)
- goto err_gpio;
-
- ret = request_irq(gpio_to_irq(pdata->gpio_charge_finished),
+ if (gpiod) {
+ ret = request_irq(gpiod_to_irq(gpiod),
s3c_adc_bat_charged,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"battery charged", NULL);
@@ -364,12 +370,9 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
return 0;
err_platform:
- if (pdata->gpio_charge_finished >= 0)
- free_irq(gpio_to_irq(pdata->gpio_charge_finished), NULL);
+ if (gpiod)
+ free_irq(gpiod_to_irq(gpiod), NULL);
err_irq:
- if (pdata->gpio_charge_finished >= 0)
- gpio_free(pdata->gpio_charge_finished);
-err_gpio:
if (pdata->backup_volt_mult)
power_supply_unregister(backup_bat.psy);
err_reg_backup:
@@ -389,10 +392,8 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
s3c_adc_release(client);
- if (pdata->gpio_charge_finished >= 0) {
- free_irq(gpio_to_irq(pdata->gpio_charge_finished), NULL);
- gpio_free(pdata->gpio_charge_finished);
- }
+ if (main_bat.charge_finished)
+ free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
cancel_delayed_work(&bat_work);
@@ -408,12 +409,12 @@ static int s3c_adc_bat_suspend(struct platform_device *pdev,
{
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
- if (pdata->gpio_charge_finished >= 0) {
+ if (main_bat.charge_finished) {
if (device_may_wakeup(&pdev->dev))
enable_irq_wake(
- gpio_to_irq(pdata->gpio_charge_finished));
+ gpiod_to_irq(main_bat.charge_finished));
else {
- disable_irq(gpio_to_irq(pdata->gpio_charge_finished));
+ disable_irq(gpiod_to_irq(main_bat.charge_finished));
main_bat.pdata->disable_charger();
}
}
@@ -425,12 +426,12 @@ static int s3c_adc_bat_resume(struct platform_device *pdev)
{
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
- if (pdata->gpio_charge_finished >= 0) {
+ if (main_bat.charge_finished) {
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(
- gpio_to_irq(pdata->gpio_charge_finished));
+ gpiod_to_irq(main_bat.charge_finished));
else
- enable_irq(gpio_to_irq(pdata->gpio_charge_finished));
+ enable_irq(gpiod_to_irq(main_bat.charge_finished));
}
/* Schedule timer to check current status */
diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c
index 18b33f14dfee..4cd2dd870039 100644
--- a/drivers/power/supply/wm831x_power.c
+++ b/drivers/power/supply/wm831x_power.c
@@ -668,7 +668,6 @@ static int wm831x_power_probe(struct platform_device *pdev)
fallthrough;
case -EPROBE_DEFER:
goto err_bat_irq;
- break;
}
return ret;
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index e54aa2d82f50..65512b6cc6fd 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -1196,7 +1196,7 @@ static int ps3_lpm_probe(struct ps3_system_bus_device *dev)
return 0;
}
-static int ps3_lpm_remove(struct ps3_system_bus_device *dev)
+static void ps3_lpm_remove(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%u:\n", __func__, __LINE__);
@@ -1206,7 +1206,6 @@ static int ps3_lpm_remove(struct ps3_system_bus_device *dev)
lpm_priv = NULL;
dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
- return 0;
}
static struct ps3_system_bus_driver ps3_lpm_driver = {
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index 4ed131eaff51..e34ae6a442c7 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -1102,7 +1102,7 @@ static int ps3_vuart_cleanup(struct ps3_system_bus_device *dev)
* device can no longer be used.
*/
-static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
+static void ps3_vuart_remove(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
struct ps3_vuart_port_driver *drv;
@@ -1118,7 +1118,7 @@ static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
- return 0;
+ return;
}
drv = ps3_system_bus_dev_to_vuart_drv(dev);
@@ -1141,7 +1141,6 @@ static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
- return 0;
}
/**
@@ -1154,7 +1153,7 @@ static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
* sequence.
*/
-static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
+static void ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_driver *drv;
@@ -1169,7 +1168,7 @@ static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
- return 0;
+ return;
}
drv = ps3_system_bus_dev_to_vuart_drv(dev);
@@ -1193,7 +1192,6 @@ static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
- return 0;
}
static int __init ps3_vuart_bus_init(void)
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index 333ba83006e4..a12a1ad9b5fe 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -189,7 +189,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
dev->bounce_size, DMA_BIDIRECTIONAL);
- if (!dev->bounce_dma) {
+ if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) {
dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
__func__, __LINE__);
error = -ENODEV;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 476d7c7fe70a..f2edef0df40f 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -64,6 +64,7 @@ config DP83640_PHY
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
depends on PTP_1588_CLOCK
+ select CRC32
help
Supports the DP83640 PHYTER with IEEE 1588 features.
@@ -78,6 +79,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_INES
tristate "ZHAW InES PTP time stamping IP core"
depends on NETWORK_PHY_TIMESTAMPING
+ depends on HAS_IOMEM
depends on PHYLIB
depends on PTP_1588_CLOCK
help
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 63be5362fd3a..0937e1c047ac 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -53,8 +53,8 @@ config PWM_AB8500
config PWM_ATMEL
tristate "Atmel PWM support"
- depends on OF
depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_IOMEM && OF
help
Generic PWM framework driver for Atmel SoC.
@@ -75,7 +75,8 @@ config PWM_ATMEL_HLCDC_PWM
config PWM_ATMEL_TCB
tristate "Atmel TC Block PWM support"
- depends on ATMEL_TCLIB && OF
+ depends on OF
+ select REGMAP_MMIO
help
Generic PWM framework driver for Atmel Timer Counter Block.
@@ -88,7 +89,7 @@ config PWM_ATMEL_TCB
config PWM_BCM_IPROC
tristate "iProc PWM support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on COMMON_CLK
+ depends on COMMON_CLK && HAS_IOMEM
default ARCH_BCM_IPROC
help
Generic PWM framework driver for Broadcom iProc PWM block. This
@@ -111,6 +112,7 @@ config PWM_BCM_KONA
config PWM_BCM2835
tristate "BCM2835 PWM support"
depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on HAS_IOMEM
help
PWM framework driver for BCM2835 controller (Raspberry Pi)
@@ -120,6 +122,7 @@ config PWM_BCM2835
config PWM_BERLIN
tristate "Marvell Berlin PWM support"
depends on ARCH_BERLIN || COMPILE_TEST
+ depends on HAS_IOMEM
help
PWM framework driver for Marvell Berlin SoCs.
@@ -129,6 +132,7 @@ config PWM_BERLIN
config PWM_BRCMSTB
tristate "Broadcom STB PWM support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for the Broadcom Set-top-Box
SoCs (BCM7xxx).
@@ -160,9 +164,19 @@ config PWM_CROS_EC
PWM driver for exposing a PWM attached to the ChromeOS Embedded
Controller.
+config PWM_DWC
+ tristate "DesignWare PWM Controller"
+ depends on PCI
+ help
+ PWM driver for Synopsys DWC PWM Controller attached to a PCI bus.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-dwc.
+
config PWM_EP93XX
tristate "Cirrus Logic EP93xx PWM support"
depends on ARCH_EP93XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for Cirrus Logic EP93xx.
@@ -184,6 +198,7 @@ config PWM_FSL_FTM
config PWM_HIBVT
tristate "HiSilicon BVT PWM support"
depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for HiSilicon BVT SoCs.
@@ -206,6 +221,7 @@ config PWM_IMG
config PWM_IMX1
tristate "i.MX1 PWM support"
depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for i.MX1 and i.MX21
@@ -215,6 +231,7 @@ config PWM_IMX1
config PWM_IMX27
tristate "i.MX27 PWM support"
depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for i.MX27 and later i.MX SoCs.
@@ -232,6 +249,17 @@ config PWM_IMX_TPM
To compile this driver as a module, choose M here: the module
will be called pwm-imx-tpm.
+config PWM_INTEL_LGM
+ tristate "Intel LGM PWM support"
+ depends on HAS_IOMEM
+ depends on (OF && X86) || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Generic PWM fan controller driver for LGM SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-intel-lgm.
+
config PWM_IQS620A
tristate "Azoteq IQS620A PWM support"
depends on MFD_IQS62X || COMPILE_TEST
@@ -254,6 +282,15 @@ config PWM_JZ4740
To compile this driver as a module, choose M here: the module
will be called pwm-jz4740.
+config PWM_KEEMBAY
+ tristate "Intel Keem Bay PWM driver"
+ depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+ help
+ The platform driver for Intel Keem Bay PWM controller.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-keembay.
+
config PWM_LP3943
tristate "TI/National Semiconductor LP3943 PWM support"
depends on MFD_LP3943
@@ -267,6 +304,7 @@ config PWM_LP3943
config PWM_LPC18XX_SCT
tristate "LPC18xx/43xx PWM/SCT support"
depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for NXP LPC18xx PWM/SCT which
supports 16 channels.
@@ -279,6 +317,7 @@ config PWM_LPC18XX_SCT
config PWM_LPC32XX
tristate "LPC32XX PWM support"
depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for LPC32XX. The LPC32XX SOC has two
PWM controllers.
@@ -287,11 +326,13 @@ config PWM_LPC32XX
will be called pwm-lpc32xx.
config PWM_LPSS
+ depends on HAS_IOMEM
tristate
config PWM_LPSS_PCI
tristate "Intel LPSS PWM PCI driver"
- depends on X86 && PCI
+ depends on X86 || COMPILE_TEST
+ depends on HAS_IOMEM && PCI
select PWM_LPSS
help
The PCI driver for Intel Low Power Subsystem PWM controller.
@@ -301,7 +342,8 @@ config PWM_LPSS_PCI
config PWM_LPSS_PLATFORM
tristate "Intel LPSS PWM platform driver"
- depends on X86 && ACPI
+ depends on (X86 && ACPI) || COMPILE_TEST
+ depends on HAS_IOMEM
select PWM_LPSS
help
The platform driver for Intel Low Power Subsystem PWM controller.
@@ -312,7 +354,7 @@ config PWM_LPSS_PLATFORM
config PWM_MESON
tristate "Amlogic Meson PWM driver"
depends on ARCH_MESON || COMPILE_TEST
- depends on COMMON_CLK
+ depends on COMMON_CLK && HAS_IOMEM
help
The platform driver for Amlogic Meson PWM controller.
@@ -333,6 +375,7 @@ config PWM_MTK_DISP
config PWM_MEDIATEK
tristate "MediaTek PWM support"
depends on ARCH_MEDIATEK || RALINK || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for Mediatek ARM SoC.
@@ -341,8 +384,8 @@ config PWM_MEDIATEK
config PWM_MXS
tristate "Freescale MXS PWM support"
- depends on OF
depends on ARCH_MXS || COMPILE_TEST
+ depends on HAS_IOMEM && OF
select STMP_DEVICE
help
Generic PWM framework driver for Freescale MXS.
@@ -373,6 +416,7 @@ config PWM_PCA9685
config PWM_PXA
tristate "PXA PWM support"
depends on ARCH_PXA || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for PXA.
@@ -404,6 +448,7 @@ config PWM_RENESAS_TPU
config PWM_ROCKCHIP
tristate "Rockchip PWM support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for the PWM controller found on
Rockchip SoCs.
@@ -411,6 +456,7 @@ config PWM_ROCKCHIP
config PWM_SAMSUNG
tristate "Samsung PWM support"
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for Samsung.
@@ -420,7 +466,7 @@ config PWM_SAMSUNG
config PWM_SIFIVE
tristate "SiFive PWM support"
depends on OF
- depends on COMMON_CLK
+ depends on COMMON_CLK && HAS_IOMEM
depends on RISCV || COMPILE_TEST
help
Generic PWM framework driver for SiFive SoCs.
@@ -441,7 +487,7 @@ config PWM_SL28CPLD
config PWM_SPEAR
tristate "STMicroelectronics SPEAr PWM support"
depends on PLAT_SPEAR || COMPILE_TEST
- depends on OF
+ depends on HAS_IOMEM && OF
help
Generic PWM framework driver for the PWM controller on ST
SPEAr SoCs.
@@ -463,7 +509,7 @@ config PWM_SPRD
config PWM_STI
tristate "STiH4xx PWM support"
depends on ARCH_STI || COMPILE_TEST
- depends on OF
+ depends on HAS_IOMEM && OF
help
Generic PWM framework driver for STiH4xx SoCs.
@@ -509,6 +555,7 @@ config PWM_SUN4I
config PWM_TEGRA
tristate "NVIDIA Tegra PWM support"
depends on ARCH_TEGRA || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for the PWFM controller found on NVIDIA
Tegra SoCs.
@@ -519,6 +566,7 @@ config PWM_TEGRA
config PWM_TIECAP
tristate "ECAP PWM support"
depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM
help
PWM driver support for the ECAP APWM controller found on TI SOCs
@@ -528,6 +576,7 @@ config PWM_TIECAP
config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM
help
PWM driver support for the EHRPWM controller found on TI SOCs
@@ -555,6 +604,7 @@ config PWM_TWL_LED
config PWM_VT8500
tristate "vt8500 PWM support"
depends on ARCH_VT8500 || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for vt8500.
@@ -564,6 +614,7 @@ config PWM_VT8500
config PWM_ZX
tristate "ZTE ZX PWM support"
depends on ARCH_ZX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for ZTE ZX family SoCs.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index cbdcd55d69ee..18b89d7fd092 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PWM_BRCMSTB) += pwm-brcmstb.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_CRC) += pwm-crc.o
obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o
+obj-$(CONFIG_PWM_DWC) += pwm-dwc.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
@@ -20,8 +21,10 @@ obj-$(CONFIG_PWM_IMG) += pwm-img.o
obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o
obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o
obj-$(CONFIG_PWM_IMX_TPM) += pwm-imx-tpm.o
+obj-$(CONFIG_PWM_INTEL_LGM) += pwm-intel-lgm.o
obj-$(CONFIG_PWM_IQS620A) += pwm-iqs620a.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
+obj-$(CONFIG_PWM_KEEMBAY) += pwm-keembay.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o
obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 1f16f5365d3c..a8eff4b3ee36 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -1338,7 +1338,7 @@ DEFINE_SEQ_ATTRIBUTE(pwm_debugfs);
static int __init pwm_debugfs_init(void)
{
- debugfs_create_file("pwm", S_IFREG | S_IRUGO, NULL, NULL,
+ debugfs_create_file("pwm", S_IFREG | 0444, NULL, NULL,
&pwm_debugfs_fops);
return 0;
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index fdf3964db4a6..58c6c0f5b0ec 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -101,12 +101,12 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
ab8500->chip.dev = &pdev->dev;
ab8500->chip.ops = &ab8500_pwm_ops;
- ab8500->chip.base = pdev->id;
+ ab8500->chip.base = -1;
ab8500->chip.npwm = 1;
err = pwmchip_add(&ab8500->chip);
if (err < 0)
- return err;
+ return dev_err_probe(&pdev->dev, err, "Failed to add pwm chip\n");
dev_dbg(&pdev->dev, "pwm probe successful\n");
platform_set_drvdata(pdev, ab8500);
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 85c53701958c..5ccc3e7420e9 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -16,13 +16,16 @@
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <soc/at91/atmel_tcb.h>
-#define NPWM 6
+#define NPWM 2
#define ATMEL_TC_ACMR_MASK (ATMEL_TC_ACPA | ATMEL_TC_ACPC | \
ATMEL_TC_AEEVT | ATMEL_TC_ASWTRG)
@@ -48,11 +51,18 @@ struct atmel_tcb_channel {
struct atmel_tcb_pwm_chip {
struct pwm_chip chip;
spinlock_t lock;
- struct atmel_tc *tc;
+ u8 channel;
+ u8 width;
+ struct regmap *regmap;
+ struct clk *clk;
+ struct clk *gclk;
+ struct clk *slow_clk;
struct atmel_tcb_pwm_device *pwms[NPWM];
- struct atmel_tcb_channel bkup[NPWM / 2];
+ struct atmel_tcb_channel bkup;
};
+const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, };
+
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
{
return container_of(chip, struct atmel_tcb_pwm_chip, chip);
@@ -74,10 +84,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm;
- struct atmel_tc *tc = tcbpwmc->tc;
- void __iomem *regs = tc->regs;
- unsigned group = pwm->hwpwm / 2;
- unsigned index = pwm->hwpwm % 2;
unsigned cmr;
int ret;
@@ -85,7 +91,7 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
if (!tcbpwm)
return -ENOMEM;
- ret = clk_prepare_enable(tc->clk[group]);
+ ret = clk_prepare_enable(tcbpwmc->clk);
if (ret) {
devm_kfree(chip->dev, tcbpwm);
return ret;
@@ -98,28 +104,31 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
tcbpwm->div = 0;
spin_lock(&tcbpwmc->lock);
- cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/*
* Get init config from Timer Counter registers if
* Timer Counter is already configured as a PWM generator.
*/
if (cmr & ATMEL_TC_WAVE) {
- if (index == 0)
- tcbpwm->duty =
- __raw_readl(regs + ATMEL_TC_REG(group, RA));
+ if (pwm->hwpwm == 0)
+ regmap_read(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, RA),
+ &tcbpwm->duty);
else
- tcbpwm->duty =
- __raw_readl(regs + ATMEL_TC_REG(group, RB));
+ regmap_read(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, RB),
+ &tcbpwm->duty);
tcbpwm->div = cmr & ATMEL_TC_TCCLKS;
- tcbpwm->period = __raw_readl(regs + ATMEL_TC_REG(group, RC));
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, RC),
+ &tcbpwm->period);
cmr &= (ATMEL_TC_TCCLKS | ATMEL_TC_ACMR_MASK |
ATMEL_TC_BCMR_MASK);
} else
cmr = 0;
cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0;
- __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
spin_unlock(&tcbpwmc->lock);
tcbpwmc->pwms[pwm->hwpwm] = tcbpwm;
@@ -131,9 +140,8 @@ static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
- struct atmel_tc *tc = tcbpwmc->tc;
- clk_disable_unprepare(tc->clk[pwm->hwpwm / 2]);
+ clk_disable_unprepare(tcbpwmc->clk);
tcbpwmc->pwms[pwm->hwpwm] = NULL;
devm_kfree(chip->dev, tcbpwm);
}
@@ -142,10 +150,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
- struct atmel_tc *tc = tcbpwmc->tc;
- void __iomem *regs = tc->regs;
- unsigned group = pwm->hwpwm / 2;
- unsigned index = pwm->hwpwm % 2;
unsigned cmr;
enum pwm_polarity polarity = tcbpwm->polarity;
@@ -161,10 +165,10 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
polarity = !polarity;
spin_lock(&tcbpwmc->lock);
- cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/* flush old setting and set the new one */
- if (index == 0) {
+ if (pwm->hwpwm == 0) {
cmr &= ~ATMEL_TC_ACMR_MASK;
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_ASWTRG_CLEAR;
@@ -178,20 +182,22 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
cmr |= ATMEL_TC_BSWTRG_SET;
}
- __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
/*
* Use software trigger to apply the new setting.
* If both PWM devices in this group are disabled we stop the clock.
*/
if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) {
- __raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS,
- regs + ATMEL_TC_REG(group, CCR));
- tcbpwmc->bkup[group].enabled = 1;
+ regmap_write(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, CCR),
+ ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS);
+ tcbpwmc->bkup.enabled = 1;
} else {
- __raw_writel(ATMEL_TC_SWTRG, regs +
- ATMEL_TC_REG(group, CCR));
- tcbpwmc->bkup[group].enabled = 0;
+ regmap_write(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, CCR),
+ ATMEL_TC_SWTRG);
+ tcbpwmc->bkup.enabled = 0;
}
spin_unlock(&tcbpwmc->lock);
@@ -201,10 +207,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
- struct atmel_tc *tc = tcbpwmc->tc;
- void __iomem *regs = tc->regs;
- unsigned group = pwm->hwpwm / 2;
- unsigned index = pwm->hwpwm % 2;
u32 cmr;
enum pwm_polarity polarity = tcbpwm->polarity;
@@ -220,12 +222,12 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
polarity = !polarity;
spin_lock(&tcbpwmc->lock);
- cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/* flush old setting and set the new one */
cmr &= ~ATMEL_TC_TCCLKS;
- if (index == 0) {
+ if (pwm->hwpwm == 0) {
cmr &= ~ATMEL_TC_ACMR_MASK;
/* Set CMR flags according to given polarity */
@@ -248,7 +250,7 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
* this config till next config call.
*/
if (tcbpwm->duty != tcbpwm->period && tcbpwm->duty > 0) {
- if (index == 0) {
+ if (pwm->hwpwm == 0) {
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_ACPA_SET | ATMEL_TC_ACPC_CLEAR;
else
@@ -263,19 +265,24 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
cmr |= (tcbpwm->div & ATMEL_TC_TCCLKS);
- __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
- if (index == 0)
- __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RA));
+ if (pwm->hwpwm == 0)
+ regmap_write(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, RA),
+ tcbpwm->duty);
else
- __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RB));
+ regmap_write(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, RB),
+ tcbpwm->duty);
- __raw_writel(tcbpwm->period, regs + ATMEL_TC_REG(group, RC));
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, RC),
+ tcbpwm->period);
/* Use software trigger to apply the new setting */
- __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
- regs + ATMEL_TC_REG(group, CCR));
- tcbpwmc->bkup[group].enabled = 1;
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CCR),
+ ATMEL_TC_SWTRG | ATMEL_TC_CLKEN);
+ tcbpwmc->bkup.enabled = 1;
spin_unlock(&tcbpwmc->lock);
return 0;
}
@@ -285,29 +292,29 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
- unsigned group = pwm->hwpwm / 2;
- unsigned index = pwm->hwpwm % 2;
struct atmel_tcb_pwm_device *atcbpwm = NULL;
- struct atmel_tc *tc = tcbpwmc->tc;
- int i;
+ int i = 0;
int slowclk = 0;
unsigned period;
unsigned duty;
- unsigned rate = clk_get_rate(tc->clk[group]);
+ unsigned rate = clk_get_rate(tcbpwmc->clk);
unsigned long long min;
unsigned long long max;
/*
* Find best clk divisor:
* the smallest divisor which can fulfill the period_ns requirements.
+ * If there is a gclk, the first divisor is actuallly the gclk selector
*/
- for (i = 0; i < 5; ++i) {
- if (atmel_tc_divisors[i] == 0) {
+ if (tcbpwmc->gclk)
+ i = 1;
+ for (; i < ARRAY_SIZE(atmel_tcb_divisors); ++i) {
+ if (atmel_tcb_divisors[i] == 0) {
slowclk = i;
continue;
}
- min = div_u64((u64)NSEC_PER_SEC * atmel_tc_divisors[i], rate);
- max = min << tc->tcb_config->counter_width;
+ min = div_u64((u64)NSEC_PER_SEC * atmel_tcb_divisors[i], rate);
+ max = min << tcbpwmc->width;
if (max >= period_ns)
break;
}
@@ -316,11 +323,11 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* If none of the divisor are small enough to represent period_ns
* take slow clock (32KHz).
*/
- if (i == 5) {
+ if (i == ARRAY_SIZE(atmel_tcb_divisors)) {
i = slowclk;
- rate = clk_get_rate(tc->slow_clk);
+ rate = clk_get_rate(tcbpwmc->slow_clk);
min = div_u64(NSEC_PER_SEC, rate);
- max = min << tc->tcb_config->counter_width;
+ max = min << tcbpwmc->width;
/* If period is too big return ERANGE error */
if (max < period_ns)
@@ -330,17 +337,13 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = div_u64(duty_ns, min);
period = div_u64(period_ns, min);
- if (index == 0)
- atcbpwm = tcbpwmc->pwms[pwm->hwpwm + 1];
+ if (pwm->hwpwm == 0)
+ atcbpwm = tcbpwmc->pwms[1];
else
- atcbpwm = tcbpwmc->pwms[pwm->hwpwm - 1];
+ atcbpwm = tcbpwmc->pwms[0];
/*
- * PWM devices provided by TCB driver are grouped by 2:
- * - group 0: PWM 0 & 1
- * - group 1: PWM 2 & 3
- * - group 2: PWM 4 & 5
- *
+ * PWM devices provided by the TCB driver are grouped by 2.
* PWM devices in a given group must be configured with the
* same period_ns.
*
@@ -376,32 +379,75 @@ static const struct pwm_ops atmel_tcb_pwm_ops = {
.owner = THIS_MODULE,
};
+static struct atmel_tcb_config tcb_rm9200_config = {
+ .counter_width = 16,
+};
+
+static struct atmel_tcb_config tcb_sam9x5_config = {
+ .counter_width = 32,
+};
+
+static struct atmel_tcb_config tcb_sama5d2_config = {
+ .counter_width = 32,
+ .has_gclk = 1,
+};
+
+static const struct of_device_id atmel_tcb_of_match[] = {
+ { .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, },
+ { .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, },
+ { .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, },
+ { /* sentinel */ }
+};
+
static int atmel_tcb_pwm_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct atmel_tcb_pwm_chip *tcbpwm;
+ const struct atmel_tcb_config *config;
struct device_node *np = pdev->dev.of_node;
- struct atmel_tc *tc;
+ struct regmap *regmap;
+ struct clk *clk, *gclk = NULL;
+ struct clk *slow_clk;
+ char clk_name[] = "t0_clk";
int err;
- int tcblock;
+ int channel;
- err = of_property_read_u32(np, "tc-block", &tcblock);
+ err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
dev_err(&pdev->dev,
- "failed to get Timer Counter Block number from device tree (error: %d)\n",
+ "failed to get Timer Counter Block channel from device tree (error: %d)\n",
err);
return err;
}
- tc = atmel_tc_alloc(tcblock);
- if (tc == NULL) {
- dev_err(&pdev->dev, "failed to allocate Timer Counter Block\n");
- return -ENOMEM;
+ regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
+ if (IS_ERR(slow_clk))
+ return PTR_ERR(slow_clk);
+
+ clk_name[1] += channel;
+ clk = of_clk_get_by_name(np->parent, clk_name);
+ if (IS_ERR(clk))
+ clk = of_clk_get_by_name(np->parent, "t0_clk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ match = of_match_node(atmel_tcb_of_match, np->parent);
+ config = match->data;
+
+ if (config->has_gclk) {
+ gclk = of_clk_get_by_name(np->parent, "gclk");
+ if (IS_ERR(gclk))
+ return PTR_ERR(gclk);
}
tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
if (tcbpwm == NULL) {
err = -ENOMEM;
- goto err_free_tc;
+ goto err_slow_clk;
}
tcbpwm->chip.dev = &pdev->dev;
@@ -410,11 +456,16 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
tcbpwm->chip.of_pwm_n_cells = 3;
tcbpwm->chip.base = -1;
tcbpwm->chip.npwm = NPWM;
- tcbpwm->tc = tc;
-
- err = clk_prepare_enable(tc->slow_clk);
+ tcbpwm->channel = channel;
+ tcbpwm->regmap = regmap;
+ tcbpwm->clk = clk;
+ tcbpwm->gclk = gclk;
+ tcbpwm->slow_clk = slow_clk;
+ tcbpwm->width = config->counter_width;
+
+ err = clk_prepare_enable(slow_clk);
if (err)
- goto err_free_tc;
+ goto err_slow_clk;
spin_lock_init(&tcbpwm->lock);
@@ -427,10 +478,10 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
return 0;
err_disable_clk:
- clk_disable_unprepare(tcbpwm->tc->slow_clk);
+ clk_disable_unprepare(tcbpwm->slow_clk);
-err_free_tc:
- atmel_tc_free(tc);
+err_slow_clk:
+ clk_put(slow_clk);
return err;
}
@@ -440,14 +491,14 @@ static int atmel_tcb_pwm_remove(struct platform_device *pdev)
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
int err;
- clk_disable_unprepare(tcbpwm->tc->slow_clk);
+ clk_disable_unprepare(tcbpwm->slow_clk);
+ clk_put(tcbpwm->slow_clk);
+ clk_put(tcbpwm->clk);
err = pwmchip_remove(&tcbpwm->chip);
if (err < 0)
return err;
- atmel_tc_free(tcbpwm->tc);
-
return 0;
}
@@ -461,38 +512,33 @@ MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
static int atmel_tcb_pwm_suspend(struct device *dev)
{
struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
- void __iomem *base = tcbpwm->tc->regs;
- int i;
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup;
+ unsigned int channel = tcbpwm->channel;
- for (i = 0; i < (NPWM / 2); i++) {
- struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+ regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), &chan->cmr);
+ regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), &chan->ra);
+ regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), &chan->rb);
+ regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), &chan->rc);
- chan->cmr = readl(base + ATMEL_TC_REG(i, CMR));
- chan->ra = readl(base + ATMEL_TC_REG(i, RA));
- chan->rb = readl(base + ATMEL_TC_REG(i, RB));
- chan->rc = readl(base + ATMEL_TC_REG(i, RC));
- }
return 0;
}
static int atmel_tcb_pwm_resume(struct device *dev)
{
struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
- void __iomem *base = tcbpwm->tc->regs;
- int i;
-
- for (i = 0; i < (NPWM / 2); i++) {
- struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
-
- writel(chan->cmr, base + ATMEL_TC_REG(i, CMR));
- writel(chan->ra, base + ATMEL_TC_REG(i, RA));
- writel(chan->rb, base + ATMEL_TC_REG(i, RB));
- writel(chan->rc, base + ATMEL_TC_REG(i, RC));
- if (chan->enabled) {
- writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
- base + ATMEL_TC_REG(i, CCR));
- }
- }
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup;
+ unsigned int channel = tcbpwm->channel;
+
+ regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), chan->cmr);
+ regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), chan->ra);
+ regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), chan->rb);
+ regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), chan->rc);
+
+ if (chan->enabled)
+ regmap_write(tcbpwm->regmap,
+ ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+ ATMEL_TC_REG(channel, CCR));
+
return 0;
}
#endif
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 6161e7e3e9ac..5813339b597b 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -401,7 +401,6 @@ MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
static int atmel_pwm_probe(struct platform_device *pdev)
{
struct atmel_pwm_chip *atmel_pwm;
- struct resource *res;
int ret;
atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
@@ -412,8 +411,7 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->data = of_device_get_match_data(&pdev->dev);
atmel_pwm->updated_pwms = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- atmel_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ atmel_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pwm->base))
return PTR_ERR(atmel_pwm->base);
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 79b1e58e946d..f4853c4a2d75 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -197,7 +197,6 @@ static const struct pwm_ops iproc_pwm_ops = {
static int iproc_pwmc_probe(struct platform_device *pdev)
{
struct iproc_pwmc *ip;
- struct resource *res;
unsigned int i;
u32 value;
int ret;
@@ -215,8 +214,7 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
ip->chip.of_xlate = of_pwm_xlate_with_flags;
ip->chip.of_pwm_n_cells = 3;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ip->base = devm_ioremap_resource(&pdev->dev, res);
+ ip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ip->base))
return PTR_ERR(ip->base);
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 16c5898b934a..578b3621c97e 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -259,7 +259,6 @@ static const struct pwm_ops kona_pwm_ops = {
static int kona_pwmc_probe(struct platform_device *pdev)
{
struct kona_pwmc *kp;
- struct resource *res;
unsigned int chan;
unsigned int value = 0;
int ret = 0;
@@ -277,8 +276,7 @@ static int kona_pwmc_probe(struct platform_device *pdev)
kp->chip.of_xlate = of_pwm_xlate_with_flags;
kp->chip.of_pwm_n_cells = 3;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- kp->base = devm_ioremap_resource(&pdev->dev, res);
+ kp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kp->base))
return PTR_ERR(kp->base);
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 6841dcfe27fc..6ff5f04b3e07 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -58,13 +58,15 @@ static void bcm2835_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
writel(value, pc->base + PWM_CONTROL);
}
-static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
+
struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
unsigned long rate = clk_get_rate(pc->clk);
+ unsigned long long period;
unsigned long scaler;
- u32 period;
+ u32 val;
if (!rate) {
dev_err(pc->dev, "failed to get clock rate\n");
@@ -72,54 +74,34 @@ static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
scaler = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate);
- period = DIV_ROUND_CLOSEST(period_ns, scaler);
+ /* set period */
+ period = DIV_ROUND_CLOSEST_ULL(state->period, scaler);
- if (period < PERIOD_MIN)
+ /* dont accept a period that is too small or has been truncated */
+ if ((period < PERIOD_MIN) || (period > U32_MAX))
return -EINVAL;
- writel(DIV_ROUND_CLOSEST(duty_ns, scaler),
- pc->base + DUTY(pwm->hwpwm));
writel(period, pc->base + PERIOD(pwm->hwpwm));
- return 0;
-}
-
-static int bcm2835_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
- u32 value;
-
- value = readl(pc->base + PWM_CONTROL);
- value |= PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm);
- writel(value, pc->base + PWM_CONTROL);
+ /* set duty cycle */
+ val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, scaler);
+ writel(val, pc->base + DUTY(pwm->hwpwm));
- return 0;
-}
+ /* set polarity */
+ val = readl(pc->base + PWM_CONTROL);
-static void bcm2835_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
- u32 value;
-
- value = readl(pc->base + PWM_CONTROL);
- value &= ~(PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm));
- writel(value, pc->base + PWM_CONTROL);
-}
-
-static int bcm2835_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
- u32 value;
-
- value = readl(pc->base + PWM_CONTROL);
+ if (state->polarity == PWM_POLARITY_NORMAL)
+ val &= ~(PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ else
+ val |= PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm);
- if (polarity == PWM_POLARITY_NORMAL)
- value &= ~(PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ /* enable/disable */
+ if (state->enabled)
+ val |= PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm);
else
- value |= PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm);
+ val &= ~(PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm));
- writel(value, pc->base + PWM_CONTROL);
+ writel(val, pc->base + PWM_CONTROL);
return 0;
}
@@ -127,17 +109,13 @@ static int bcm2835_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops bcm2835_pwm_ops = {
.request = bcm2835_pwm_request,
.free = bcm2835_pwm_free,
- .config = bcm2835_pwm_config,
- .enable = bcm2835_pwm_enable,
- .disable = bcm2835_pwm_disable,
- .set_polarity = bcm2835_set_polarity,
+ .apply = bcm2835_pwm_apply,
.owner = THIS_MODULE,
};
static int bcm2835_pwm_probe(struct platform_device *pdev)
{
struct bcm2835_pwm *pc;
- struct resource *res;
int ret;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
@@ -146,8 +124,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->base = devm_ioremap_resource(&pdev->dev, res);
+ pc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->base))
return PTR_ERR(pc->base);
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index b91c477cc84b..fe405289e582 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -186,15 +186,13 @@ MODULE_DEVICE_TABLE(of, berlin_pwm_match);
static int berlin_pwm_probe(struct platform_device *pdev)
{
struct berlin_pwm_chip *pwm;
- struct resource *res;
int ret;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (!pwm)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->base))
return PTR_ERR(pwm->base);
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index fea612c45f20..8b66f9d2f589 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -234,7 +234,6 @@ MODULE_DEVICE_TABLE(of, brcmstb_pwm_of_match);
static int brcmstb_pwm_probe(struct platform_device *pdev)
{
struct brcmstb_pwm *p;
- struct resource *res;
int ret;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
@@ -262,8 +261,7 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
p->chip.base = -1;
p->chip.npwm = 2;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- p->base = devm_ioremap_resource(&pdev->dev, res);
+ p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base)) {
ret = PTR_ERR(p->base);
goto out_clk;
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index ba9500aca078..cb1af86873ee 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -113,14 +113,12 @@ static struct pwm_device *clps711x_pwm_xlate(struct pwm_chip *chip,
static int clps711x_pwm_probe(struct platform_device *pdev)
{
struct clps711x_chip *priv;
- struct resource *res;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->pmpcon = devm_ioremap_resource(&pdev->dev, res);
+ priv->pmpcon = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->pmpcon))
return PTR_ERR(priv->pmpcon);
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index ecfdfac0c2d9..1e2276808b7a 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -64,7 +64,7 @@ static int crc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
if (state->polarity != PWM_POLARITY_NORMAL)
- return -EOPNOTSUPP;
+ return -EINVAL;
if (pwm_is_enabled(pwm) && !state->enabled) {
err = regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
new file mode 100644
index 000000000000..f6c98e0d57c2
--- /dev/null
+++ b/drivers/pwm/pwm-dwc.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DesignWare PWM Controller driver
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ *
+ * Author: Felipe Balbi (Intel)
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ * Author: Raymond Tan <raymond.tan@intel.com>
+ *
+ * Limitations:
+ * - The hardware cannot generate a 0 % or 100 % duty cycle. Both high and low
+ * periods are one or more input clock periods long.
+ */
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pwm.h>
+
+#define DWC_TIM_LD_CNT(n) ((n) * 0x14)
+#define DWC_TIM_LD_CNT2(n) (((n) * 4) + 0xb0)
+#define DWC_TIM_CUR_VAL(n) (((n) * 0x14) + 0x04)
+#define DWC_TIM_CTRL(n) (((n) * 0x14) + 0x08)
+#define DWC_TIM_EOI(n) (((n) * 0x14) + 0x0c)
+#define DWC_TIM_INT_STS(n) (((n) * 0x14) + 0x10)
+
+#define DWC_TIMERS_INT_STS 0xa0
+#define DWC_TIMERS_EOI 0xa4
+#define DWC_TIMERS_RAW_INT_STS 0xa8
+#define DWC_TIMERS_COMP_VERSION 0xac
+
+#define DWC_TIMERS_TOTAL 8
+#define DWC_CLK_PERIOD_NS 10
+
+/* Timer Control Register */
+#define DWC_TIM_CTRL_EN BIT(0)
+#define DWC_TIM_CTRL_MODE BIT(1)
+#define DWC_TIM_CTRL_MODE_FREE (0 << 1)
+#define DWC_TIM_CTRL_MODE_USER (1 << 1)
+#define DWC_TIM_CTRL_INT_MASK BIT(2)
+#define DWC_TIM_CTRL_PWM BIT(3)
+
+struct dwc_pwm_ctx {
+ u32 cnt;
+ u32 cnt2;
+ u32 ctrl;
+};
+
+struct dwc_pwm {
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct dwc_pwm_ctx ctx[DWC_TIMERS_TOTAL];
+};
+#define to_dwc_pwm(p) (container_of((p), struct dwc_pwm, chip))
+
+static inline u32 dwc_pwm_readl(struct dwc_pwm *dwc, u32 offset)
+{
+ return readl(dwc->base + offset);
+}
+
+static inline void dwc_pwm_writel(struct dwc_pwm *dwc, u32 value, u32 offset)
+{
+ writel(value, dwc->base + offset);
+}
+
+static void __dwc_pwm_set_enable(struct dwc_pwm *dwc, int pwm, int enabled)
+{
+ u32 reg;
+
+ reg = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm));
+
+ if (enabled)
+ reg |= DWC_TIM_CTRL_EN;
+ else
+ reg &= ~DWC_TIM_CTRL_EN;
+
+ dwc_pwm_writel(dwc, reg, DWC_TIM_CTRL(pwm));
+}
+
+static int __dwc_pwm_configure_timer(struct dwc_pwm *dwc,
+ struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ u64 tmp;
+ u32 ctrl;
+ u32 high;
+ u32 low;
+
+ /*
+ * Calculate width of low and high period in terms of input clock
+ * periods and check are the result within HW limits between 1 and
+ * 2^32 periods.
+ */
+ tmp = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, DWC_CLK_PERIOD_NS);
+ if (tmp < 1 || tmp > (1ULL << 32))
+ return -ERANGE;
+ low = tmp - 1;
+
+ tmp = DIV_ROUND_CLOSEST_ULL(state->period - state->duty_cycle,
+ DWC_CLK_PERIOD_NS);
+ if (tmp < 1 || tmp > (1ULL << 32))
+ return -ERANGE;
+ high = tmp - 1;
+
+ /*
+ * Specification says timer usage flow is to disable timer, then
+ * program it followed by enable. It also says Load Count is loaded
+ * into timer after it is enabled - either after a disable or
+ * a reset. Based on measurements it happens also without disable
+ * whenever Load Count is updated. But follow the specification.
+ */
+ __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
+
+ /*
+ * Write Load Count and Load Count 2 registers. Former defines the
+ * width of low period and latter the width of high period in terms
+ * multiple of input clock periods:
+ * Width = ((Count + 1) * input clock period).
+ */
+ dwc_pwm_writel(dwc, low, DWC_TIM_LD_CNT(pwm->hwpwm));
+ dwc_pwm_writel(dwc, high, DWC_TIM_LD_CNT2(pwm->hwpwm));
+
+ /*
+ * Set user-defined mode, timer reloads from Load Count registers
+ * when it counts down to 0.
+ * Set PWM mode, it makes output to toggle and width of low and high
+ * periods are set by Load Count registers.
+ */
+ ctrl = DWC_TIM_CTRL_MODE_USER | DWC_TIM_CTRL_PWM;
+ dwc_pwm_writel(dwc, ctrl, DWC_TIM_CTRL(pwm->hwpwm));
+
+ /*
+ * Enable timer. Output starts from low period.
+ */
+ __dwc_pwm_set_enable(dwc, pwm->hwpwm, state->enabled);
+
+ return 0;
+}
+
+static int dwc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+
+ if (state->polarity != PWM_POLARITY_INVERSED)
+ return -EINVAL;
+
+ if (state->enabled) {
+ if (!pwm->state.enabled)
+ pm_runtime_get_sync(chip->dev);
+ return __dwc_pwm_configure_timer(dwc, pwm, state);
+ } else {
+ if (pwm->state.enabled) {
+ __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
+ pm_runtime_put_sync(chip->dev);
+ }
+ }
+
+ return 0;
+}
+
+static void dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ u64 duty, period;
+
+ pm_runtime_get_sync(chip->dev);
+
+ state->enabled = !!(dwc_pwm_readl(dwc,
+ DWC_TIM_CTRL(pwm->hwpwm)) & DWC_TIM_CTRL_EN);
+
+ duty = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(pwm->hwpwm));
+ duty += 1;
+ duty *= DWC_CLK_PERIOD_NS;
+ state->duty_cycle = duty;
+
+ period = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(pwm->hwpwm));
+ period += 1;
+ period *= DWC_CLK_PERIOD_NS;
+ period += duty;
+ state->period = period;
+
+ state->polarity = PWM_POLARITY_INVERSED;
+
+ pm_runtime_put_sync(chip->dev);
+}
+
+static const struct pwm_ops dwc_pwm_ops = {
+ .apply = dwc_pwm_apply,
+ .get_state = dwc_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct device *dev = &pci->dev;
+ struct dwc_pwm *dwc;
+ int ret;
+
+ dwc = devm_kzalloc(&pci->dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pci);
+ if (ret) {
+ dev_err(&pci->dev,
+ "Failed to enable device (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ pci_set_master(pci);
+
+ ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
+ if (ret) {
+ dev_err(&pci->dev,
+ "Failed to iomap PCI BAR (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ dwc->base = pcim_iomap_table(pci)[0];
+ if (!dwc->base) {
+ dev_err(&pci->dev, "Base address missing\n");
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pci, dwc);
+
+ dwc->chip.dev = dev;
+ dwc->chip.ops = &dwc_pwm_ops;
+ dwc->chip.npwm = DWC_TIMERS_TOTAL;
+ dwc->chip.base = -1;
+
+ ret = pwmchip_add(&dwc->chip);
+ if (ret)
+ return ret;
+
+ pm_runtime_put(dev);
+ pm_runtime_allow(dev);
+
+ return 0;
+}
+
+static void dwc_pwm_remove(struct pci_dev *pci)
+{
+ struct dwc_pwm *dwc = pci_get_drvdata(pci);
+
+ pm_runtime_forbid(&pci->dev);
+ pm_runtime_get_noresume(&pci->dev);
+
+ pwmchip_remove(&dwc->chip);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc_pwm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dwc_pwm *dwc = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ if (dwc->chip.pwms[i].state.enabled) {
+ dev_err(dev, "PWM %u in use by consumer (%s)\n",
+ i, dwc->chip.pwms[i].label);
+ return -EBUSY;
+ }
+ dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
+ dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
+ dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
+ }
+
+ return 0;
+}
+
+static int dwc_pwm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dwc_pwm *dwc = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dwc_pwm_pm_ops, dwc_pwm_suspend, dwc_pwm_resume);
+
+static const struct pci_device_id dwc_pwm_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x4bb7) }, /* Elkhart Lake */
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(pci, dwc_pwm_id_table);
+
+static struct pci_driver dwc_pwm_driver = {
+ .name = "pwm-dwc",
+ .probe = dwc_pwm_probe,
+ .remove = dwc_pwm_remove,
+ .id_table = dwc_pwm_id_table,
+ .driver = {
+ .pm = &dwc_pwm_pm_ops,
+ },
+};
+
+module_pci_driver(dwc_pwm_driver);
+
+MODULE_AUTHOR("Felipe Balbi (Intel)");
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
+MODULE_DESCRIPTION("DesignWare PWM Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 4bab73073ad7..c9fc6f223640 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -169,15 +169,13 @@ static const struct pwm_ops ep93xx_pwm_ops = {
static int ep93xx_pwm_probe(struct platform_device *pdev)
{
struct ep93xx_pwm *ep93xx_pwm;
- struct resource *res;
int ret;
ep93xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_pwm), GFP_KERNEL);
if (!ep93xx_pwm)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ep93xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ ep93xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ep93xx_pwm->base))
return PTR_ERR(ep93xx_pwm->base);
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 59272a920479..2a6801226aba 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -399,7 +399,6 @@ static const struct regmap_config fsl_pwm_regmap_config = {
static int fsl_pwm_probe(struct platform_device *pdev)
{
struct fsl_pwm_chip *fpc;
- struct resource *res;
void __iomem *base;
int ret;
@@ -412,8 +411,7 @@ static int fsl_pwm_probe(struct platform_device *pdev)
fpc->soc = of_device_get_match_data(&pdev->dev);
fpc->chip.dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index ad205fdad372..a1900d0a872e 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -190,9 +190,7 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
const struct hibvt_pwm_soc *soc =
of_device_get_match_data(&pdev->dev);
struct hibvt_pwm_chip *pwm_chip;
- struct resource *res;
- int ret;
- int i;
+ int ret, i;
pwm_chip = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip), GFP_KERNEL);
if (pwm_chip == NULL)
@@ -213,8 +211,7 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
pwm_chip->chip.of_pwm_n_cells = 3;
pwm_chip->soc = soc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm_chip->base = devm_ioremap_resource(&pdev->dev, res);
+ pwm_chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm_chip->base))
return PTR_ERR(pwm_chip->base);
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index a34d95ed70b2..6faf5b5a5584 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -240,7 +240,6 @@ static int img_pwm_probe(struct platform_device *pdev)
int ret;
u64 val;
unsigned long clk_rate;
- struct resource *res;
struct img_pwm_chip *pwm;
const struct of_device_id *of_dev_id;
@@ -250,8 +249,7 @@ static int img_pwm_probe(struct platform_device *pdev)
pwm->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->base))
return PTR_ERR(pwm->base);
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index fcdf6befb838..aaf629bd8c35 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -350,13 +350,9 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
return PTR_ERR(tpm->base);
tpm->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tpm->clk)) {
- ret = PTR_ERR(tpm->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get PWM clock: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(tpm->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tpm->clk),
+ "failed to get PWM clock\n");
ret = clk_prepare_enable(tpm->clk);
if (ret) {
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index f8b2c2e001a7..727e0d3e249e 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -136,7 +136,6 @@ MODULE_DEVICE_TABLE(of, pwm_imx1_dt_ids);
static int pwm_imx1_probe(struct platform_device *pdev)
{
struct pwm_imx1_chip *imx;
- struct resource *r;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
if (!imx)
@@ -145,31 +144,21 @@ static int pwm_imx1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx);
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(imx->clk_ipg)) {
- dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
- PTR_ERR(imx->clk_ipg));
- return PTR_ERR(imx->clk_ipg);
- }
+ if (IS_ERR(imx->clk_ipg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_ipg),
+ "getting ipg clock failed\n");
imx->clk_per = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(imx->clk_per)) {
- int ret = PTR_ERR(imx->clk_per);
-
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get peripheral clock: %d\n",
- ret);
-
- return ret;
- }
+ if (IS_ERR(imx->clk_per))
+ return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_per),
+ "failed to get peripheral clock\n");
imx->chip.ops = &pwm_imx1_ops;
imx->chip.dev = &pdev->dev;
imx->chip.base = -1;
imx->chip.npwm = 1;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index c50d453552bd..18055326a2f3 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -235,8 +235,9 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
period_cycles /= prescale;
c = clkrate * state->duty_cycle;
- do_div(c, NSEC_PER_SEC * prescale);
+ do_div(c, NSEC_PER_SEC);
duty_cycles = c;
+ duty_cycles /= prescale;
/*
* according to imx pwm RM, the real period value should be PERIOD
@@ -315,27 +316,14 @@ static int pwm_imx27_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx);
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(imx->clk_ipg)) {
- int ret = PTR_ERR(imx->clk_ipg);
-
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "getting ipg clock failed with %d\n",
- ret);
- return ret;
- }
+ if (IS_ERR(imx->clk_ipg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_ipg),
+ "getting ipg clock failed\n");
imx->clk_per = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(imx->clk_per)) {
- int ret = PTR_ERR(imx->clk_per);
-
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get peripheral clock: %d\n",
- ret);
-
- return ret;
- }
+ if (IS_ERR(imx->clk_per))
+ return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_per),
+ "failed to get peripheral clock\n");
imx->chip.ops = &pwm_imx27_ops;
imx->chip.dev = &pdev->dev;
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
new file mode 100644
index 000000000000..e9e54dda07aa
--- /dev/null
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Limitations:
+ * - The hardware supports fixed period & configures only 2-wire mode.
+ * - Supports normal polarity. Does not support changing polarity.
+ * - When PWM is disabled, output of PWM will become 0(inactive). It doesn't
+ * keep track of running period.
+ * - When duty cycle is changed, PWM output may be a mix of previous setting
+ * and new setting for the first period. From second period, the output is
+ * based on new setting.
+ * - It is a dedicated PWM fan controller. There are no other consumers for
+ * this PWM controller.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define LGM_PWM_FAN_CON0 0x0
+#define LGM_PWM_FAN_EN_EN BIT(0)
+#define LGM_PWM_FAN_EN_DIS 0x0
+#define LGM_PWM_FAN_EN_MSK BIT(0)
+#define LGM_PWM_FAN_MODE_2WIRE 0x0
+#define LGM_PWM_FAN_MODE_MSK BIT(1)
+#define LGM_PWM_FAN_DC_MSK GENMASK(23, 16)
+
+#define LGM_PWM_FAN_CON1 0x4
+#define LGM_PWM_FAN_MAX_RPM_MSK GENMASK(15, 0)
+
+#define LGM_PWM_MAX_RPM (BIT(16) - 1)
+#define LGM_PWM_DEFAULT_RPM 4000
+#define LGM_PWM_MAX_DUTY_CYCLE (BIT(8) - 1)
+
+#define LGM_PWM_DC_BITS 8
+
+#define LGM_PWM_PERIOD_2WIRE_NS (40 * NSEC_PER_MSEC)
+
+struct lgm_pwm_chip {
+ struct pwm_chip chip;
+ struct regmap *regmap;
+ u32 period;
+};
+
+static inline struct lgm_pwm_chip *to_lgm_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct lgm_pwm_chip, chip);
+}
+
+static int lgm_pwm_enable(struct pwm_chip *chip, bool enable)
+{
+ struct lgm_pwm_chip *pc = to_lgm_pwm_chip(chip);
+ struct regmap *regmap = pc->regmap;
+
+ return regmap_update_bits(regmap, LGM_PWM_FAN_CON0, LGM_PWM_FAN_EN_MSK,
+ enable ? LGM_PWM_FAN_EN_EN : LGM_PWM_FAN_EN_DIS);
+}
+
+static int lgm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct lgm_pwm_chip *pc = to_lgm_pwm_chip(chip);
+ u32 duty_cycle, val;
+ int ret;
+
+ /* The hardware only supports normal polarity and fixed period. */
+ if (state->polarity != PWM_POLARITY_NORMAL || state->period < pc->period)
+ return -EINVAL;
+
+ if (!state->enabled)
+ return lgm_pwm_enable(chip, 0);
+
+ duty_cycle = min_t(u64, state->duty_cycle, pc->period);
+ val = duty_cycle * LGM_PWM_MAX_DUTY_CYCLE / pc->period;
+
+ ret = regmap_update_bits(pc->regmap, LGM_PWM_FAN_CON0, LGM_PWM_FAN_DC_MSK,
+ FIELD_PREP(LGM_PWM_FAN_DC_MSK, val));
+ if (ret)
+ return ret;
+
+ return lgm_pwm_enable(chip, 1);
+}
+
+static void lgm_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct lgm_pwm_chip *pc = to_lgm_pwm_chip(chip);
+ u32 duty, val;
+
+ state->enabled = regmap_test_bits(pc->regmap, LGM_PWM_FAN_CON0,
+ LGM_PWM_FAN_EN_EN);
+ state->polarity = PWM_POLARITY_NORMAL;
+ state->period = pc->period; /* fixed period */
+
+ regmap_read(pc->regmap, LGM_PWM_FAN_CON0, &val);
+ duty = FIELD_GET(LGM_PWM_FAN_DC_MSK, val);
+ state->duty_cycle = DIV_ROUND_UP(duty * pc->period, LGM_PWM_MAX_DUTY_CYCLE);
+}
+
+static const struct pwm_ops lgm_pwm_ops = {
+ .get_state = lgm_pwm_get_state,
+ .apply = lgm_pwm_apply,
+ .owner = THIS_MODULE,
+};
+
+static void lgm_pwm_init(struct lgm_pwm_chip *pc)
+{
+ struct regmap *regmap = pc->regmap;
+ u32 con0_val;
+
+ con0_val = FIELD_PREP(LGM_PWM_FAN_MODE_MSK, LGM_PWM_FAN_MODE_2WIRE);
+ pc->period = LGM_PWM_PERIOD_2WIRE_NS;
+ regmap_update_bits(regmap, LGM_PWM_FAN_CON1, LGM_PWM_FAN_MAX_RPM_MSK,
+ LGM_PWM_DEFAULT_RPM);
+ regmap_update_bits(regmap, LGM_PWM_FAN_CON0, LGM_PWM_FAN_MODE_MSK,
+ con0_val);
+}
+
+static const struct regmap_config lgm_pwm_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static void lgm_clk_release(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static int lgm_clk_enable(struct device *dev, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, lgm_clk_release, clk);
+}
+
+static void lgm_reset_control_release(void *data)
+{
+ struct reset_control *rst = data;
+
+ reset_control_assert(rst);
+}
+
+static int lgm_reset_control_deassert(struct device *dev, struct reset_control *rst)
+{
+ int ret;
+
+ ret = reset_control_deassert(rst);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, lgm_reset_control_release, rst);
+}
+
+static int lgm_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *rst;
+ struct lgm_pwm_chip *pc;
+ void __iomem *io_base;
+ struct clk *clk;
+ int ret;
+
+ pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pc);
+
+ io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ pc->regmap = devm_regmap_init_mmio(dev, io_base, &lgm_pwm_regmap_config);
+ if (IS_ERR(pc->regmap))
+ return dev_err_probe(dev, PTR_ERR(pc->regmap),
+ "failed to init register map\n");
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get clock\n");
+
+ ret = lgm_clk_enable(dev, clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clock\n");
+
+ rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst),
+ "failed to get reset control\n");
+
+ ret = lgm_reset_control_deassert(dev, rst);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot deassert reset control\n");
+
+ pc->chip.dev = dev;
+ pc->chip.ops = &lgm_pwm_ops;
+ pc->chip.npwm = 1;
+ pc->chip.base = -1;
+
+ lgm_pwm_init(pc);
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to add PWM chip\n");
+
+ return 0;
+}
+
+static int lgm_pwm_remove(struct platform_device *pdev)
+{
+ struct lgm_pwm_chip *pc = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&pc->chip);
+}
+
+static const struct of_device_id lgm_pwm_of_match[] = {
+ { .compatible = "intel,lgm-pwm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lgm_pwm_of_match);
+
+static struct platform_driver lgm_pwm_driver = {
+ .driver = {
+ .name = "intel-pwm",
+ .of_match_table = lgm_pwm_of_match,
+ },
+ .probe = lgm_pwm_probe,
+ .remove = lgm_pwm_remove,
+};
+module_platform_driver(lgm_pwm_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 7d33e3646436..5ede8255926e 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -50,7 +50,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int ret;
if (state->polarity != PWM_POLARITY_NORMAL)
- return -ENOTSUPP;
+ return -EINVAL;
if (state->period < IQS620_PWM_PERIOD_NS)
return -EINVAL;
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
new file mode 100644
index 000000000000..cdfdef66ff8e
--- /dev/null
+++ b/drivers/pwm/pwm-keembay.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Keem Bay PWM driver
+ *
+ * Copyright (C) 2020 Intel Corporation
+ * Authors: Lai Poey Seng <poey.seng.lai@intel.com>
+ * Vineetha G. Jaya Kumaran <vineetha.g.jaya.kumaran@intel.com>
+ *
+ * Limitations:
+ * - Upon disabling a channel, the currently running
+ * period will not be completed. However, upon
+ * reconfiguration of the duty cycle/period, the
+ * currently running period will be completed first.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+#define KMB_TOTAL_PWM_CHANNELS 6
+#define KMB_PWM_COUNT_MAX U16_MAX
+#define KMB_PWM_EN_BIT BIT(31)
+
+/* Mask */
+#define KMB_PWM_HIGH_MASK GENMASK(31, 16)
+#define KMB_PWM_LOW_MASK GENMASK(15, 0)
+#define KMB_PWM_LEADIN_MASK GENMASK(30, 0)
+
+/* PWM Register offset */
+#define KMB_PWM_LEADIN_OFFSET(ch) (0x00 + 4 * (ch))
+#define KMB_PWM_HIGHLOW_OFFSET(ch) (0x20 + 4 * (ch))
+
+struct keembay_pwm {
+ struct pwm_chip chip;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+};
+
+static inline struct keembay_pwm *to_keembay_pwm_dev(struct pwm_chip *chip)
+{
+ return container_of(chip, struct keembay_pwm, chip);
+}
+
+static void keembay_clk_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int keembay_clk_enable(struct device *dev, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, keembay_clk_unprepare, clk);
+}
+
+/*
+ * With gcc 10, CONFIG_CC_OPTIMIZE_FOR_SIZE and only "inline" instead of
+ * "__always_inline" this fails to compile because the compiler doesn't notice
+ * for all valid masks (e.g. KMB_PWM_LEADIN_MASK) that they are ok.
+ */
+static __always_inline void keembay_pwm_update_bits(struct keembay_pwm *priv, u32 mask,
+ u32 val, u32 offset)
+{
+ u32 buff = readl(priv->base + offset);
+
+ buff = u32_replace_bits(buff, val, mask);
+ writel(buff, priv->base + offset);
+}
+
+static void keembay_pwm_enable(struct keembay_pwm *priv, int ch)
+{
+ keembay_pwm_update_bits(priv, KMB_PWM_EN_BIT, 1,
+ KMB_PWM_LEADIN_OFFSET(ch));
+}
+
+static void keembay_pwm_disable(struct keembay_pwm *priv, int ch)
+{
+ keembay_pwm_update_bits(priv, KMB_PWM_EN_BIT, 0,
+ KMB_PWM_LEADIN_OFFSET(ch));
+}
+
+static void keembay_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct keembay_pwm *priv = to_keembay_pwm_dev(chip);
+ unsigned long long high, low;
+ unsigned long clk_rate;
+ u32 highlow;
+
+ clk_rate = clk_get_rate(priv->clk);
+
+ /* Read channel enabled status */
+ highlow = readl(priv->base + KMB_PWM_LEADIN_OFFSET(pwm->hwpwm));
+ if (highlow & KMB_PWM_EN_BIT)
+ state->enabled = true;
+ else
+ state->enabled = false;
+
+ /* Read period and duty cycle */
+ highlow = readl(priv->base + KMB_PWM_HIGHLOW_OFFSET(pwm->hwpwm));
+ low = FIELD_GET(KMB_PWM_LOW_MASK, highlow) * NSEC_PER_SEC;
+ high = FIELD_GET(KMB_PWM_HIGH_MASK, highlow) * NSEC_PER_SEC;
+ state->duty_cycle = DIV_ROUND_UP_ULL(high, clk_rate);
+ state->period = DIV_ROUND_UP_ULL(high + low, clk_rate);
+ state->polarity = PWM_POLARITY_NORMAL;
+}
+
+static int keembay_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct keembay_pwm *priv = to_keembay_pwm_dev(chip);
+ struct pwm_state current_state;
+ unsigned long long div;
+ unsigned long clk_rate;
+ u32 pwm_count = 0;
+ u16 high, low;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ /*
+ * Configure the pwm repeat count as infinite at (15:0) and leadin
+ * low time as 0 at (30:16), which is in terms of clock cycles.
+ */
+ keembay_pwm_update_bits(priv, KMB_PWM_LEADIN_MASK, 0,
+ KMB_PWM_LEADIN_OFFSET(pwm->hwpwm));
+
+ keembay_pwm_get_state(chip, pwm, &current_state);
+
+ if (!state->enabled) {
+ if (current_state.enabled)
+ keembay_pwm_disable(priv, pwm->hwpwm);
+ return 0;
+ }
+
+ /*
+ * The upper 16 bits and lower 16 bits of the KMB_PWM_HIGHLOW_OFFSET
+ * register contain the high time and low time of waveform accordingly.
+ * All the values are in terms of clock cycles.
+ */
+
+ clk_rate = clk_get_rate(priv->clk);
+ div = clk_rate * state->duty_cycle;
+ div = DIV_ROUND_DOWN_ULL(div, NSEC_PER_SEC);
+ if (div > KMB_PWM_COUNT_MAX)
+ return -ERANGE;
+
+ high = div;
+ div = clk_rate * state->period;
+ div = DIV_ROUND_DOWN_ULL(div, NSEC_PER_SEC);
+ div = div - high;
+ if (div > KMB_PWM_COUNT_MAX)
+ return -ERANGE;
+
+ low = div;
+
+ pwm_count = FIELD_PREP(KMB_PWM_HIGH_MASK, high) |
+ FIELD_PREP(KMB_PWM_LOW_MASK, low);
+
+ writel(pwm_count, priv->base + KMB_PWM_HIGHLOW_OFFSET(pwm->hwpwm));
+
+ if (state->enabled && !current_state.enabled)
+ keembay_pwm_enable(priv, pwm->hwpwm);
+
+ return 0;
+}
+
+static const struct pwm_ops keembay_pwm_ops = {
+ .owner = THIS_MODULE,
+ .apply = keembay_pwm_apply,
+ .get_state = keembay_pwm_get_state,
+};
+
+static int keembay_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct keembay_pwm *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n");
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ ret = keembay_clk_enable(dev, priv->clk);
+ if (ret)
+ return ret;
+
+ priv->chip.base = -1;
+ priv->chip.dev = dev;
+ priv->chip.ops = &keembay_pwm_ops;
+ priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
+
+ ret = pwmchip_add(&priv->chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int keembay_pwm_remove(struct platform_device *pdev)
+{
+ struct keembay_pwm *priv = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&priv->chip);
+}
+
+static const struct of_device_id keembay_pwm_of_match[] = {
+ { .compatible = "intel,keembay-pwm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, keembay_pwm_of_match);
+
+static struct platform_driver keembay_pwm_driver = {
+ .probe = keembay_pwm_probe,
+ .remove = keembay_pwm_remove,
+ .driver = {
+ .name = "pwm-keembay",
+ .of_match_table = keembay_pwm_of_match,
+ },
+};
+module_platform_driver(keembay_pwm_driver);
+
+MODULE_ALIAS("platform:pwm-keembay");
+MODULE_DESCRIPTION("Intel Keem Bay PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 7551253ada32..bf3f14fb5f24 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -275,6 +275,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
lp3943_pwm->chip.dev = &pdev->dev;
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
+ lp3943_pwm->chip.base = -1;
platform_set_drvdata(pdev, lp3943_pwm);
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 5ff11145c1a3..dc5133bec3e7 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -325,7 +325,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm;
struct pwm_device *pwm;
- struct resource *res;
int ret, i;
u64 val;
@@ -336,8 +335,7 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
lpc18xx_pwm->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpc18xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ lpc18xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc18xx_pwm->base))
return PTR_ERR(lpc18xx_pwm->base);
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 710d9a207d2b..6b4090436c06 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -98,7 +98,6 @@ static const struct pwm_ops lpc32xx_pwm_ops = {
static int lpc32xx_pwm_probe(struct platform_device *pdev)
{
struct lpc32xx_pwm_chip *lpc32xx;
- struct resource *res;
int ret;
u32 val;
@@ -106,8 +105,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
if (!lpc32xx)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpc32xx->base = devm_ioremap_resource(&pdev->dev, res);
+ lpc32xx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc32xx->base))
return PTR_ERR(lpc32xx->base);
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index c6502cf7a7af..986786be1e49 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -58,7 +58,25 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev)
platform_set_drvdata(pdev, lpwm);
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE);
+ /*
+ * On Cherry Trail devices the GFX0._PS0 AML checks if the controller
+ * is on and if it is not on it turns it on and restores what it
+ * believes is the correct state to the PWM controller.
+ * Because of this we must disallow direct-complete, which keeps the
+ * controller (runtime)suspended on resume, to avoid 2 issues:
+ * 1. The controller getting turned on without the linux-pm code
+ * knowing about this. On devices where the controller is unused
+ * this causes it to stay on during the next suspend causing high
+ * battery drain (because S0i3 is not reached)
+ * 2. The state restoring code unexpectedly messing with the controller
+ *
+ * Leaving the controller runtime-suspended (skipping runtime-resume +
+ * normal-suspend) during suspend is fine.
+ */
+ if (info->other_devices_aml_touches_pwm_regs)
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE|
+ DPM_FLAG_SMART_SUSPEND);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -73,24 +91,6 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev)
return pwm_lpss_remove(lpwm);
}
-static int pwm_lpss_prepare(struct device *dev)
-{
- struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
-
- /*
- * If other device's AML code touches the PWM regs on suspend/resume
- * force runtime-resume the PWM controller to allow this.
- */
- if (lpwm->info->other_devices_aml_touches_pwm_regs)
- return 0; /* Force runtime-resume */
-
- return 1; /* If runtime-suspended leave as is */
-}
-
-static const struct dev_pm_ops pwm_lpss_platform_pm_ops = {
- .prepare = pwm_lpss_prepare,
-};
-
static const struct acpi_device_id pwm_lpss_acpi_match[] = {
{ "80860F09", (unsigned long)&pwm_lpss_byt_info },
{ "80862288", (unsigned long)&pwm_lpss_bsw_info },
@@ -104,7 +104,6 @@ static struct platform_driver pwm_lpss_driver_platform = {
.driver = {
.name = "pwm-lpss",
.acpi_match_table = pwm_lpss_acpi_match,
- .pm = &pwm_lpss_platform_pm_ops,
},
.probe = pwm_lpss_probe_platform,
.remove = pwm_lpss_remove_platform,
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 3444c56b4bed..939de93c157b 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -76,7 +76,12 @@ static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
static inline int pwm_lpss_is_updating(struct pwm_device *pwm)
{
- return (pwm_lpss_read(pwm) & PWM_SW_UPDATE) ? -EBUSY : 0;
+ if (pwm_lpss_read(pwm) & PWM_SW_UPDATE) {
+ dev_err(pwm->chip->dev, "PWM_SW_UPDATE is still set, skipping update\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index ab001ce55178..fcfc3b147e5f 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -30,12 +30,14 @@
#define PWM45DWIDTH_FIXUP 0x30
#define PWMTHRES 0x30
#define PWM45THRES_FIXUP 0x34
+#define PWM_CK_26M_SEL 0x210
#define PWM_CLK_DIV_MAX 7
struct pwm_mediatek_of_data {
unsigned int num_pwms;
bool pwm45_fixup;
+ bool has_ck_26m_sel;
};
/**
@@ -132,6 +134,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret < 0)
return ret;
+ /* Make sure we use the bus clock and not the 26MHz clock */
+ if (pc->soc->has_ck_26m_sel)
+ writel(0, pc->regs + PWM_CK_26M_SEL);
+
/* Using resolution in picosecond gets accuracy higher */
resolution = (u64)NSEC_PER_SEC * 1000;
do_div(resolution, clk_get_rate(pc->clk_pwms[pwm->hwpwm]));
@@ -208,7 +214,6 @@ static const struct pwm_ops pwm_mediatek_ops = {
static int pwm_mediatek_probe(struct platform_device *pdev)
{
struct pwm_mediatek_chip *pc;
- struct resource *res;
unsigned int i;
int ret;
@@ -218,8 +223,7 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
pc->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->regs = devm_ioremap_resource(&pdev->dev, res);
+ pc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
@@ -281,31 +285,43 @@ static int pwm_mediatek_remove(struct platform_device *pdev)
static const struct pwm_mediatek_of_data mt2712_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
+ .has_ck_26m_sel = false,
};
static const struct pwm_mediatek_of_data mt7622_pwm_data = {
.num_pwms = 6,
.pwm45_fixup = false,
+ .has_ck_26m_sel = false,
};
static const struct pwm_mediatek_of_data mt7623_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = true,
+ .has_ck_26m_sel = false,
};
static const struct pwm_mediatek_of_data mt7628_pwm_data = {
.num_pwms = 4,
.pwm45_fixup = true,
+ .has_ck_26m_sel = false,
};
static const struct pwm_mediatek_of_data mt7629_pwm_data = {
.num_pwms = 1,
.pwm45_fixup = false,
+ .has_ck_26m_sel = false,
+};
+
+static const struct pwm_mediatek_of_data mt8183_pwm_data = {
+ .num_pwms = 4,
+ .pwm45_fixup = false,
+ .has_ck_26m_sel = true,
};
static const struct pwm_mediatek_of_data mt8516_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = false,
+ .has_ck_26m_sel = true,
};
static const struct of_device_id pwm_mediatek_of_match[] = {
@@ -314,6 +330,7 @@ static const struct of_device_id pwm_mediatek_of_match[] = {
{ .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
{ .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
{ .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data },
+ { .compatible = "mediatek,mt8183-pwm", .data = &mt8183_pwm_data },
{ .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data },
{ },
};
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index bd0d7336b898..a3ce9789412a 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -537,15 +537,13 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
static int meson_pwm_probe(struct platform_device *pdev)
{
struct meson_pwm *meson;
- struct resource *regs;
int err;
meson = devm_kzalloc(&pdev->dev, sizeof(*meson), GFP_KERNEL);
if (!meson)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- meson->base = devm_ioremap_resource(&pdev->dev, regs);
+ meson->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson->base))
return PTR_ERR(meson->base);
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 83b8be0209b7..87c6b4bc5d43 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -172,7 +172,6 @@ static const struct pwm_ops mtk_disp_pwm_ops = {
static int mtk_disp_pwm_probe(struct platform_device *pdev)
{
struct mtk_disp_pwm *mdp;
- struct resource *r;
int ret;
mdp = devm_kzalloc(&pdev->dev, sizeof(*mdp), GFP_KERNEL);
@@ -181,8 +180,7 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
mdp->data = of_device_get_match_data(&pdev->dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdp->base = devm_ioremap_resource(&pdev->dev, r);
+ mdp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdp->base))
return PTR_ERR(mdp->base);
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index a2a0912c2dcd..d06cf60e6575 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -166,7 +166,6 @@ static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
struct pxa_pwm_chip *pwm;
- struct resource *r;
int ret = 0;
if (IS_ENABLED(CONFIG_OF) && id == NULL)
@@ -193,8 +192,7 @@ static int pwm_probe(struct platform_device *pdev)
pwm->chip.of_pwm_n_cells = 1;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ pwm->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->mmio_base))
return PTR_ERR(pwm->mmio_base);
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 7ab9eb6616d9..002ab79a7ec2 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -168,7 +168,7 @@ static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
/* This HW/driver only supports normal polarity */
if (state->polarity != PWM_POLARITY_NORMAL)
- return -ENOTSUPP;
+ return -EINVAL;
if (!state->enabled) {
rcar_pwm_disable(rp);
@@ -204,15 +204,13 @@ static const struct pwm_ops rcar_pwm_ops = {
static int rcar_pwm_probe(struct platform_device *pdev)
{
struct rcar_pwm_chip *rcar_pwm;
- struct resource *res;
int ret;
rcar_pwm = devm_kzalloc(&pdev->dev, sizeof(*rcar_pwm), GFP_KERNEL);
if (rcar_pwm == NULL)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rcar_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ rcar_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcar_pwm->base))
return PTR_ERR(rcar_pwm->base);
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 81ad5a551455..d02b24b77cdf 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -383,7 +383,6 @@ static const struct pwm_ops tpu_pwm_ops = {
static int tpu_probe(struct platform_device *pdev)
{
struct tpu_device *tpu;
- struct resource *res;
int ret;
tpu = devm_kzalloc(&pdev->dev, sizeof(*tpu), GFP_KERNEL);
@@ -394,8 +393,7 @@ static int tpu_probe(struct platform_device *pdev)
tpu->pdev = pdev;
/* Map memory, get clock and pin control. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tpu->base = devm_ioremap_resource(&pdev->dev, res);
+ tpu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tpu->base))
return PTR_ERR(tpu->base);
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 77c23a2c6d71..389a5e140412 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -287,7 +287,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
struct rockchip_pwm_chip *pc;
- struct resource *r;
u32 enable_conf, ctrl;
int ret, count;
@@ -299,8 +298,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->base = devm_ioremap_resource(&pdev->dev, r);
+ pc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->base))
return PTR_ERR(pc->base);
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 87a886f7dc2f..645d0066ff0a 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -510,7 +510,6 @@ static int pwm_samsung_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct samsung_pwm_chip *chip;
- struct resource *res;
unsigned int chan;
int ret;
@@ -541,8 +540,7 @@ static int pwm_samsung_probe(struct platform_device *pdev)
sizeof(chip->variant));
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(&pdev->dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 2485fbaaead2..2a7cd2deaeea 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -232,7 +232,6 @@ static int pwm_sifive_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pwm_sifive_ddata *ddata;
struct pwm_chip *chip;
- struct resource *res;
int ret;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
@@ -248,8 +247,7 @@ static int pwm_sifive_probe(struct platform_device *pdev)
chip->base = -1;
chip->npwm = 4;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddata->regs = devm_ioremap_resource(dev, res);
+ ddata->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddata->regs))
return PTR_ERR(ddata->regs);
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index b4c651fc749c..0b01ec25e2f0 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -232,6 +232,8 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
chip->base = -1;
chip->npwm = 1;
+ platform_set_drvdata(pdev, priv);
+
ret = pwmchip_add(&priv->pwm_chip);
if (ret) {
dev_err(&pdev->dev, "failed to add PWM chip (%pe)",
@@ -239,8 +241,6 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, priv);
-
return 0;
}
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 6c6b44fd3f43..f63b54aae1b4 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -174,7 +174,6 @@ static int spear_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_pwm_chip *pc;
- struct resource *r;
int ret;
u32 val;
@@ -182,8 +181,7 @@ static int spear_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 1508616d794c..99c70e07858d 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -505,7 +505,6 @@ static int sti_pwm_probe_dt(struct sti_pwm_chip *pc)
if (IS_ERR(pc->prescale_high))
return PTR_ERR(pc->prescale_high);
-
pc->pwm_out_en = devm_regmap_field_alloc(dev, pc->regmap,
reg_fields[PWM_OUT_EN]);
if (IS_ERR(pc->pwm_out_en))
@@ -540,7 +539,6 @@ static int sti_pwm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sti_pwm_compat_data *cdata;
struct sti_pwm_chip *pc;
- struct resource *res;
unsigned int i;
int irq, ret;
@@ -552,9 +550,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
if (!cdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- pc->mmio = devm_ioremap_resource(dev, res);
+ pc->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio))
return PTR_ERR(pc->mmio);
@@ -593,38 +589,34 @@ static int sti_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (!cdata->pwm_num_devs)
- goto skip_pwm;
-
- pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
- if (IS_ERR(pc->pwm_clk)) {
- dev_err(dev, "failed to get PWM clock\n");
- return PTR_ERR(pc->pwm_clk);
- }
+ if (cdata->pwm_num_devs) {
+ pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
+ if (IS_ERR(pc->pwm_clk)) {
+ dev_err(dev, "failed to get PWM clock\n");
+ return PTR_ERR(pc->pwm_clk);
+ }
- ret = clk_prepare(pc->pwm_clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- return ret;
+ ret = clk_prepare(pc->pwm_clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
+ }
}
-skip_pwm:
- if (!cdata->cpt_num_devs)
- goto skip_cpt;
-
- pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
- if (IS_ERR(pc->cpt_clk)) {
- dev_err(dev, "failed to get PWM capture clock\n");
- return PTR_ERR(pc->cpt_clk);
- }
+ if (cdata->cpt_num_devs) {
+ pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
+ if (IS_ERR(pc->cpt_clk)) {
+ dev_err(dev, "failed to get PWM capture clock\n");
+ return PTR_ERR(pc->cpt_clk);
+ }
- ret = clk_prepare(pc->cpt_clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- return ret;
+ ret = clk_prepare(pc->cpt_clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
+ }
}
-skip_cpt:
pc->chip.dev = dev;
pc->chip.ops = &sti_pwm_ops;
pc->chip.base = -1;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 38a4c5c1317b..ce5c4fc8da6f 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -294,12 +294,8 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ctrl |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- if (state->enabled) {
+ if (state->enabled)
ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
- } else {
- ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm);
- ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- }
sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
@@ -395,7 +391,6 @@ MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids);
static int sun4i_pwm_probe(struct platform_device *pdev)
{
struct sun4i_pwm_chip *pwm;
- struct resource *res;
int ret;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
@@ -406,8 +401,7 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
if (!pwm->data)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->base))
return PTR_ERR(pwm->base);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 1daf591025c0..55bc63d5a0ae 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -237,7 +237,6 @@ static const struct pwm_ops tegra_pwm_ops = {
static int tegra_pwm_probe(struct platform_device *pdev)
{
struct tegra_pwm_chip *pwm;
- struct resource *r;
int ret;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
@@ -247,8 +246,7 @@ static int tegra_pwm_probe(struct platform_device *pdev)
pwm->soc = of_device_get_match_data(&pdev->dev);
pwm->dev = &pdev->dev;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->regs = devm_ioremap_resource(&pdev->dev, r);
+ pwm->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->regs))
return PTR_ERR(pwm->regs);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 683804c7d26c..2a8949014bb1 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -196,7 +196,6 @@ static int ecap_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ecap_pwm_chip *pc;
- struct resource *r;
struct clk *clk;
int ret;
@@ -230,8 +229,7 @@ static int ecap_pwm_probe(struct platform_device *pdev)
pc->chip.base = -1;
pc->chip.npwm = 1;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 0846917ff2d2..a7fb224d6535 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -421,7 +421,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ehrpwm_pwm_chip *pc;
- struct resource *r;
struct clk *clk;
int ret;
@@ -437,10 +436,8 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
}
}
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk), "Failed to get fck\n");
pc->clk_rate = clk_get_rate(clk);
if (!pc->clk_rate) {
@@ -455,17 +452,14 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
pc->chip.base = -1;
pc->chip.npwm = NUM_PWM_CHANNEL;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
/* Acquire tbclk for Time Base EHRPWM submodule */
pc->tbclk = devm_clk_get(&pdev->dev, "tbclk");
- if (IS_ERR(pc->tbclk)) {
- dev_err(&pdev->dev, "Failed to get tbclk\n");
- return PTR_ERR(pc->tbclk);
- }
+ if (IS_ERR(pc->tbclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->tbclk), "Failed to get tbclk\n");
ret = clk_prepare(pc->tbclk);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 11d45e56a923..6e36851a22bb 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -193,7 +193,6 @@ MODULE_DEVICE_TABLE(of, vt8500_pwm_dt_ids);
static int vt8500_pwm_probe(struct platform_device *pdev)
{
struct vt8500_chip *chip;
- struct resource *r;
struct device_node *np = pdev->dev.of_node;
int ret;
@@ -219,8 +218,7 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
return PTR_ERR(chip->clk);
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(&pdev->dev, r);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c
index e2c21cc34a96..34e91195ce98 100644
--- a/drivers/pwm/pwm-zx.c
+++ b/drivers/pwm/pwm-zx.c
@@ -196,7 +196,6 @@ static const struct pwm_ops zx_pwm_ops = {
static int zx_pwm_probe(struct platform_device *pdev)
{
struct zx_pwm_chip *zpc;
- struct resource *res;
unsigned int i;
int ret;
@@ -204,8 +203,7 @@ static int zx_pwm_probe(struct platform_device *pdev)
if (!zpc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- zpc->base = devm_ioremap_resource(&pdev->dev, res);
+ zpc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(zpc->base))
return PTR_ERR(zpc->base);
@@ -238,6 +236,7 @@ static int zx_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&zpc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+ clk_disable_unprepare(zpc->pclk);
return ret;
}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 606986c5ba2c..c2b79736a92b 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1413,71 +1413,6 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,
EXPORT_SYMBOL_GPL(rio_mport_get_feature);
/**
- * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did
- * @vid: RIO vid to match or %RIO_ANY_ID to match all vids
- * @did: RIO did to match or %RIO_ANY_ID to match all dids
- * @asm_vid: RIO asm_vid to match or %RIO_ANY_ID to match all asm_vids
- * @asm_did: RIO asm_did to match or %RIO_ANY_ID to match all asm_dids
- * @from: Previous RIO device found in search, or %NULL for new search
- *
- * Iterates through the list of known RIO devices. If a RIO device is
- * found with a matching @vid, @did, @asm_vid, @asm_did, the reference
- * count to the device is incrememted and a pointer to its device
- * structure is returned. Otherwise, %NULL is returned. A new search
- * is initiated by passing %NULL to the @from argument. Otherwise, if
- * @from is not %NULL, searches continue from next device on the global
- * list. The reference count for @from is always decremented if it is
- * not %NULL.
- */
-struct rio_dev *rio_get_asm(u16 vid, u16 did,
- u16 asm_vid, u16 asm_did, struct rio_dev *from)
-{
- struct list_head *n;
- struct rio_dev *rdev;
-
- WARN_ON(in_interrupt());
- spin_lock(&rio_global_list_lock);
- n = from ? from->global_list.next : rio_devices.next;
-
- while (n && (n != &rio_devices)) {
- rdev = rio_dev_g(n);
- if ((vid == RIO_ANY_ID || rdev->vid == vid) &&
- (did == RIO_ANY_ID || rdev->did == did) &&
- (asm_vid == RIO_ANY_ID || rdev->asm_vid == asm_vid) &&
- (asm_did == RIO_ANY_ID || rdev->asm_did == asm_did))
- goto exit;
- n = n->next;
- }
- rdev = NULL;
- exit:
- rio_dev_put(from);
- rdev = rio_dev_get(rdev);
- spin_unlock(&rio_global_list_lock);
- return rdev;
-}
-EXPORT_SYMBOL_GPL(rio_get_asm);
-
-/**
- * rio_get_device - Begin or continue searching for a RIO device by vid/did
- * @vid: RIO vid to match or %RIO_ANY_ID to match all vids
- * @did: RIO did to match or %RIO_ANY_ID to match all dids
- * @from: Previous RIO device found in search, or %NULL for new search
- *
- * Iterates through the list of known RIO devices. If a RIO device is
- * found with a matching @vid and @did, the reference count to the
- * device is incrememted and a pointer to its device structure is returned.
- * Otherwise, %NULL is returned. A new search is initiated by passing %NULL
- * to the @from argument. Otherwise, if @from is not %NULL, searches
- * continue from next device on the global list. The reference count for
- * @from is always decremented if it is not %NULL.
- */
-struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from)
-{
- return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from);
-}
-EXPORT_SYMBOL_GPL(rio_get_device);
-
-/**
* rio_std_route_add_entry - Add switch route table entry using standard
* registers defined in RIO specification rev.1.3
* @mport: Master port to issue transaction
@@ -2106,20 +2041,6 @@ found:
return rc;
}
-static void rio_fixup_device(struct rio_dev *dev)
-{
-}
-
-static int rio_init(void)
-{
- struct rio_dev *dev = NULL;
-
- while ((dev = rio_get_device(RIO_ANY_ID, RIO_ANY_ID, dev)) != NULL) {
- rio_fixup_device(dev);
- }
- return 0;
-}
-
static struct workqueue_struct *rio_wq;
struct rio_disc_work {
@@ -2206,8 +2127,6 @@ int rio_init_mports(void)
kfree(work);
no_disc:
- rio_init();
-
return 0;
}
EXPORT_SYMBOL_GPL(rio_init_mports);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 53fa84f4d1e1..5abdd29fb9f3 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -881,6 +881,7 @@ config REGULATOR_QCOM_RPM
config REGULATOR_QCOM_RPMH
tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST)
+ depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST)
help
This driver supports control of PMIC regulators via the RPMh hardware
block found on Qualcomm Technologies Inc. SoCs. RPMh regulator
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e6d5d98c3cea..9309765d0450 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -15,6 +15,36 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
+/* Typical regulator startup times as per data sheet in uS */
+#define BD71847_BUCK1_STARTUP_TIME 144
+#define BD71847_BUCK2_STARTUP_TIME 162
+#define BD71847_BUCK3_STARTUP_TIME 162
+#define BD71847_BUCK4_STARTUP_TIME 240
+#define BD71847_BUCK5_STARTUP_TIME 270
+#define BD71847_BUCK6_STARTUP_TIME 200
+#define BD71847_LDO1_STARTUP_TIME 440
+#define BD71847_LDO2_STARTUP_TIME 370
+#define BD71847_LDO3_STARTUP_TIME 310
+#define BD71847_LDO4_STARTUP_TIME 400
+#define BD71847_LDO5_STARTUP_TIME 530
+#define BD71847_LDO6_STARTUP_TIME 400
+
+#define BD71837_BUCK1_STARTUP_TIME 160
+#define BD71837_BUCK2_STARTUP_TIME 180
+#define BD71837_BUCK3_STARTUP_TIME 180
+#define BD71837_BUCK4_STARTUP_TIME 180
+#define BD71837_BUCK5_STARTUP_TIME 160
+#define BD71837_BUCK6_STARTUP_TIME 240
+#define BD71837_BUCK7_STARTUP_TIME 220
+#define BD71837_BUCK8_STARTUP_TIME 200
+#define BD71837_LDO1_STARTUP_TIME 440
+#define BD71837_LDO2_STARTUP_TIME 370
+#define BD71837_LDO3_STARTUP_TIME 310
+#define BD71837_LDO4_STARTUP_TIME 400
+#define BD71837_LDO5_STARTUP_TIME 310
+#define BD71837_LDO6_STARTUP_TIME 400
+#define BD71837_LDO7_STARTUP_TIME 530
+
/*
* BD718(37/47/50) have two "enable control modes". ON/OFF can either be
* controlled by software - or by PMIC internal HW state machine. Whether
@@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_buck3_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
.linear_range_selectors = bd71847_buck4_volt_range_sel,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_ldo5_volt_range_sel,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd71837_buck5_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_BUCK6_MASK,
.enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK8_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO5_MASK,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO7_MASK,
.enable_reg = BD71837_REG_LDO7_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ca03d8e70bd1..67a768fe5b2a 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1813,13 +1813,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
- int ret;
+ int ret = 0;
/* No supply to resolve? */
if (!rdev->supply_name)
return 0;
- /* Supply already resolved? */
+ /* Supply already resolved? (fast-path without locking contention) */
if (rdev->supply)
return 0;
@@ -1829,7 +1829,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
/* Did the lookup explicitly defer for us? */
if (ret == -EPROBE_DEFER)
- return ret;
+ goto out;
if (have_full_constraints()) {
r = dummy_regulator_rdev;
@@ -1837,15 +1837,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
rdev->supply_name, rdev->desc->name);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
if (r == rdev) {
dev_err(dev, "Supply for %s (%s) resolved to itself\n",
rdev->desc->name, rdev->supply_name);
- if (!have_full_constraints())
- return -EINVAL;
+ if (!have_full_constraints()) {
+ ret = -EINVAL;
+ goto out;
+ }
r = dummy_regulator_rdev;
get_device(&r->dev);
}
@@ -1859,7 +1862,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
if (!device_is_bound(r->dev.parent)) {
put_device(&r->dev);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
@@ -1867,15 +1871,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
ret = regulator_resolve_supply(r);
if (ret < 0) {
put_device(&r->dev);
- return ret;
+ goto out;
+ }
+
+ /*
+ * Recheck rdev->supply with rdev->mutex lock held to avoid a race
+ * between rdev->supply null check and setting rdev->supply in
+ * set_supply() from concurrent tasks.
+ */
+ regulator_lock(rdev);
+
+ /* Supply just resolved by a concurrent task? */
+ if (rdev->supply) {
+ regulator_unlock(rdev);
+ put_device(&r->dev);
+ goto out;
}
ret = set_supply(rdev, r);
if (ret < 0) {
+ regulator_unlock(rdev);
put_device(&r->dev);
- return ret;
+ goto out;
}
+ regulator_unlock(rdev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
@@ -1886,11 +1907,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (ret < 0) {
_regulator_put(rdev->supply);
rdev->supply = NULL;
- return ret;
+ goto out;
}
}
- return 0;
+out:
+ return ret;
}
/* Internal regulator request function */
diff --git a/drivers/regulator/pf8x00-regulator.c b/drivers/regulator/pf8x00-regulator.c
index 308c27fa6ea8..af9918cd27aa 100644
--- a/drivers/regulator/pf8x00-regulator.c
+++ b/drivers/regulator/pf8x00-regulator.c
@@ -469,13 +469,17 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
}
static const struct of_device_id pf8x00_dt_ids[] = {
- { .compatible = "nxp,pf8x00",},
+ { .compatible = "nxp,pf8100",},
+ { .compatible = "nxp,pf8121a",},
+ { .compatible = "nxp,pf8200",},
{ }
};
MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
static const struct i2c_device_id pf8x00_i2c_id[] = {
- { "pf8x00", 0 },
+ { "pf8100", 0 },
+ { "pf8121a", 0 },
+ { "pf8200", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index fe030ec4b7db..c395a8dda6f7 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+ .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
.n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index d99548fb5dde..9e7efe542f69 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -125,6 +125,18 @@ config KEYSTONE_REMOTEPROC
It's safe to say N here if you're not interested in the Keystone
DSPs or just want to use a bare minimum kernel.
+config PRU_REMOTEPROC
+ tristate "TI PRU remoteproc support"
+ depends on TI_PRUSS
+ default TI_PRUSS
+ help
+ Support for TI PRU remote processors present within a PRU-ICSS
+ subsystem via the remote processor framework.
+
+ Say Y or M here to support the Programmable Realtime Unit (PRU)
+ processors on various TI SoCs. It's safe to say N here if you're
+ not interested in the PRU or if you are unsure.
+
config QCOM_PIL_INFO
tristate
@@ -183,7 +195,7 @@ config QCOM_Q6V5_PAS
select QCOM_RPROC_COMMON
select QCOM_SCM
help
- Say y here to support the TrustZone based Peripherial Image Loader
+ Say y here to support the TrustZone based Peripheral Image Loader
for the Qualcomm Hexagon v5 based remote processors. This is commonly
used to control subsystems such as ADSP, Compute and Sensor.
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index da2ace4ec86c..bb26c9e4ef9c 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
+obj-$(CONFIG_PRU_REMOTEPROC) += pru_rproc.o
obj-$(CONFIG_QCOM_PIL_INFO) += qcom_pil_info.o
obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index 1c2b21a5d178..26e19e6143b7 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -135,7 +135,7 @@ static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
return (__force void *)va;
}
-static struct rproc_ops ingenic_rproc_ops = {
+static const struct rproc_ops ingenic_rproc_ops = {
.prepare = ingenic_rproc_prepare,
.unprepare = ingenic_rproc_unprepare,
.start = ingenic_rproc_start,
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
index 47b4561443a9..988edb4977c3 100644
--- a/drivers/remoteproc/mtk_common.h
+++ b/drivers/remoteproc/mtk_common.h
@@ -32,22 +32,22 @@
#define MT8183_SCP_CACHESIZE_8KB BIT(8)
#define MT8183_SCP_CACHE_CON_WAYEN BIT(10)
-#define MT8192_L2TCM_SRAM_PD_0 0x210C0
-#define MT8192_L2TCM_SRAM_PD_1 0x210C4
-#define MT8192_L2TCM_SRAM_PD_2 0x210C8
-#define MT8192_L1TCM_SRAM_PDN 0x2102C
-#define MT8192_CPU0_SRAM_PD 0x21080
-
-#define MT8192_SCP2APMCU_IPC_SET 0x24080
-#define MT8192_SCP2APMCU_IPC_CLR 0x24084
+#define MT8192_L2TCM_SRAM_PD_0 0x10C0
+#define MT8192_L2TCM_SRAM_PD_1 0x10C4
+#define MT8192_L2TCM_SRAM_PD_2 0x10C8
+#define MT8192_L1TCM_SRAM_PDN 0x102C
+#define MT8192_CPU0_SRAM_PD 0x1080
+
+#define MT8192_SCP2APMCU_IPC_SET 0x4080
+#define MT8192_SCP2APMCU_IPC_CLR 0x4084
#define MT8192_SCP_IPC_INT_BIT BIT(0)
-#define MT8192_SCP2SPM_IPC_CLR 0x24094
-#define MT8192_GIPC_IN_SET 0x24098
+#define MT8192_SCP2SPM_IPC_CLR 0x4094
+#define MT8192_GIPC_IN_SET 0x4098
#define MT8192_HOST_IPC_INT_BIT BIT(0)
-#define MT8192_CORE0_SW_RSTN_CLR 0x30000
-#define MT8192_CORE0_SW_RSTN_SET 0x30004
-#define MT8192_CORE0_WDT_CFG 0x30034
+#define MT8192_CORE0_SW_RSTN_CLR 0x10000
+#define MT8192_CORE0_SW_RSTN_SET 0x10004
+#define MT8192_CORE0_WDT_CFG 0x10034
#define SCP_FW_VER_LEN 32
#define SCP_SHARE_BUFFER_SIZE 288
@@ -78,6 +78,8 @@ struct mtk_scp_of_data {
u32 host_to_scp_reg;
u32 host_to_scp_int_bit;
+
+ size_t ipi_buf_offset;
};
struct mtk_scp {
@@ -99,7 +101,7 @@ struct mtk_scp {
bool ipi_id_ack[SCP_IPI_MAX];
wait_queue_head_t ack_wq;
- void __iomem *cpu_addr;
+ void *cpu_addr;
dma_addr_t dma_addr;
size_t dram_size;
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 577cbd5d421e..e0c235690361 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -21,7 +21,7 @@
#include "remoteproc_internal.h"
#define MAX_CODE_SIZE 0x500000
-#define SCP_FW_END 0x7C000
+#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
/**
* scp_get() - get a reference to SCP.
@@ -119,16 +119,29 @@ static void scp_ipi_handler(struct mtk_scp *scp)
wake_up(&scp->ack_wq);
}
-static int scp_ipi_init(struct mtk_scp *scp)
+static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ const struct firmware *fw,
+ size_t *offset);
+
+static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
{
- size_t send_offset = SCP_FW_END - sizeof(struct mtk_share_obj);
- size_t recv_offset = send_offset - sizeof(struct mtk_share_obj);
+ int ret;
+ size_t offset;
- /* shared buffer initialization */
- scp->recv_buf =
- (struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset);
- scp->send_buf =
- (struct mtk_share_obj __iomem *)(scp->sram_base + send_offset);
+ /* read the ipi buf addr from FW itself first */
+ ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
+ if (ret) {
+ /* use default ipi buf addr if the FW doesn't have it */
+ offset = scp->data->ipi_buf_offset;
+ if (!offset)
+ return ret;
+ }
+ dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
+
+ scp->recv_buf = (struct mtk_share_obj __iomem *)
+ (scp->sram_base + offset);
+ scp->send_buf = (struct mtk_share_obj __iomem *)
+ (scp->sram_base + offset + sizeof(*scp->recv_buf));
memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
@@ -234,12 +247,14 @@ static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
u32 offset = phdr->p_offset;
void __iomem *ptr;
- if (phdr->p_type != PT_LOAD)
- continue;
-
dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
phdr->p_type, da, memsz, filesz);
+ if (phdr->p_type != PT_LOAD)
+ continue;
+ if (!filesz)
+ continue;
+
if (filesz > memsz) {
dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
filesz, memsz);
@@ -263,14 +278,38 @@ static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
}
/* put the segment where the remote processor expects it */
- if (phdr->p_filesz)
- scp_memcpy_aligned(ptr, elf_data + phdr->p_offset,
- filesz);
+ scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
}
return ret;
}
+static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ const struct firmware *fw,
+ size_t *offset)
+{
+ struct elf32_hdr *ehdr;
+ struct elf32_shdr *shdr, *shdr_strtab;
+ int i;
+ const u8 *elf_data = fw->data;
+ const char *strtab;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ shdr_strtab = shdr + ehdr->e_shstrndx;
+ strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
+
+ for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
+ if (strcmp(strtab + shdr->sh_name,
+ SECTION_NAME_IPI_BUFFER) == 0) {
+ *offset = shdr->sh_addr;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int mt8183_scp_before_load(struct mtk_scp *scp)
{
/* Clear SCP to host interrupt */
@@ -298,7 +337,7 @@ static int mt8183_scp_before_load(struct mtk_scp *scp)
return 0;
}
-static void mt8192_power_on_sram(void *addr)
+static void mt8192_power_on_sram(void __iomem *addr)
{
int i;
@@ -307,7 +346,7 @@ static void mt8192_power_on_sram(void *addr)
writel(0, addr);
}
-static void mt8192_power_off_sram(void *addr)
+static void mt8192_power_off_sram(void __iomem *addr)
{
int i;
@@ -350,14 +389,32 @@ static int scp_load(struct rproc *rproc, const struct firmware *fw)
ret = scp->data->scp_before_load(scp);
if (ret < 0)
- return ret;
+ goto leave;
ret = scp_elf_load_segments(rproc, fw);
+leave:
clk_disable_unprepare(scp->clk);
return ret;
}
+static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct mtk_scp *scp = rproc->priv;
+ struct device *dev = scp->dev;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ ret = scp_ipi_init(scp, fw);
+ clk_disable_unprepare(scp->clk);
+ return ret;
+}
+
static int scp_start(struct rproc *rproc)
{
struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
@@ -408,12 +465,12 @@ static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
if (da < scp->sram_size) {
offset = da;
- if (offset >= 0 && (offset + len) < scp->sram_size)
+ if (offset >= 0 && (offset + len) <= scp->sram_size)
return (void __force *)scp->sram_base + offset;
} else if (scp->dram_size) {
offset = da - scp->dma_addr;
- if (offset >= 0 && (offset + len) < scp->dram_size)
- return (void __force *)scp->cpu_addr + offset;
+ if (offset >= 0 && (offset + len) <= scp->dram_size)
+ return scp->cpu_addr + offset;
}
return NULL;
@@ -461,6 +518,7 @@ static const struct rproc_ops scp_ops = {
.stop = scp_stop,
.load = scp_load,
.da_to_va = scp_da_to_va,
+ .parse_fw = scp_parse_fw,
};
/**
@@ -680,19 +738,6 @@ static int scp_probe(struct platform_device *pdev)
goto release_dev_mem;
}
- ret = clk_prepare_enable(scp->clk);
- if (ret) {
- dev_err(dev, "failed to enable clocks\n");
- goto release_dev_mem;
- }
-
- ret = scp_ipi_init(scp);
- clk_disable_unprepare(scp->clk);
- if (ret) {
- dev_err(dev, "Failed to init ipi\n");
- goto release_dev_mem;
- }
-
/* register SCP initialization IPI */
ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
if (ret) {
@@ -760,6 +805,7 @@ static const struct mtk_scp_of_data mt8183_of_data = {
.scp_stop = mt8183_scp_stop,
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
+ .ipi_buf_offset = 0x7bdb0,
};
static const struct mtk_scp_of_data mt8192_of_data = {
@@ -784,7 +830,7 @@ static struct platform_driver mtk_scp_driver = {
.remove = scp_remove,
.driver = {
.name = "mtk-scp",
- .of_match_table = of_match_ptr(mtk_scp_of_match),
+ .of_match_table = mtk_scp_of_match,
},
};
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
new file mode 100644
index 000000000000..2667919d76b3
--- /dev/null
+++ b/drivers/remoteproc/pru_rproc.c
@@ -0,0 +1,875 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS remoteproc driver for various TI SoCs
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Author(s):
+ * Suman Anna <s-anna@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ * Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org> for Texas Instruments
+ */
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pruss_driver.h>
+#include <linux/remoteproc.h>
+
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+#include "pru_rproc.h"
+
+/* PRU_ICSS_PRU_CTRL registers */
+#define PRU_CTRL_CTRL 0x0000
+#define PRU_CTRL_STS 0x0004
+#define PRU_CTRL_WAKEUP_EN 0x0008
+#define PRU_CTRL_CYCLE 0x000C
+#define PRU_CTRL_STALL 0x0010
+#define PRU_CTRL_CTBIR0 0x0020
+#define PRU_CTRL_CTBIR1 0x0024
+#define PRU_CTRL_CTPPR0 0x0028
+#define PRU_CTRL_CTPPR1 0x002C
+
+/* CTRL register bit-fields */
+#define CTRL_CTRL_SOFT_RST_N BIT(0)
+#define CTRL_CTRL_EN BIT(1)
+#define CTRL_CTRL_SLEEPING BIT(2)
+#define CTRL_CTRL_CTR_EN BIT(3)
+#define CTRL_CTRL_SINGLE_STEP BIT(8)
+#define CTRL_CTRL_RUNSTATE BIT(15)
+
+/* PRU_ICSS_PRU_DEBUG registers */
+#define PRU_DEBUG_GPREG(x) (0x0000 + (x) * 4)
+#define PRU_DEBUG_CT_REG(x) (0x0080 + (x) * 4)
+
+/* PRU/RTU/Tx_PRU Core IRAM address masks */
+#define PRU_IRAM_ADDR_MASK 0x3ffff
+#define PRU0_IRAM_ADDR_MASK 0x34000
+#define PRU1_IRAM_ADDR_MASK 0x38000
+#define RTU0_IRAM_ADDR_MASK 0x4000
+#define RTU1_IRAM_ADDR_MASK 0x6000
+#define TX_PRU0_IRAM_ADDR_MASK 0xa000
+#define TX_PRU1_IRAM_ADDR_MASK 0xc000
+
+/* PRU device addresses for various type of PRU RAMs */
+#define PRU_IRAM_DA 0 /* Instruction RAM */
+#define PRU_PDRAM_DA 0 /* Primary Data RAM */
+#define PRU_SDRAM_DA 0x2000 /* Secondary Data RAM */
+#define PRU_SHRDRAM_DA 0x10000 /* Shared Data RAM */
+
+#define MAX_PRU_SYS_EVENTS 160
+
+/**
+ * enum pru_iomem - PRU core memory/register range identifiers
+ *
+ * @PRU_IOMEM_IRAM: PRU Instruction RAM range
+ * @PRU_IOMEM_CTRL: PRU Control register range
+ * @PRU_IOMEM_DEBUG: PRU Debug register range
+ * @PRU_IOMEM_MAX: just keep this one at the end
+ */
+enum pru_iomem {
+ PRU_IOMEM_IRAM = 0,
+ PRU_IOMEM_CTRL,
+ PRU_IOMEM_DEBUG,
+ PRU_IOMEM_MAX,
+};
+
+/**
+ * enum pru_type - PRU core type identifier
+ *
+ * @PRU_TYPE_PRU: Programmable Real-time Unit
+ * @PRU_TYPE_RTU: Auxiliary Programmable Real-Time Unit
+ * @PRU_TYPE_TX_PRU: Transmit Programmable Real-Time Unit
+ * @PRU_TYPE_MAX: just keep this one at the end
+ */
+enum pru_type {
+ PRU_TYPE_PRU = 0,
+ PRU_TYPE_RTU,
+ PRU_TYPE_TX_PRU,
+ PRU_TYPE_MAX,
+};
+
+/**
+ * struct pru_private_data - device data for a PRU core
+ * @type: type of the PRU core (PRU, RTU, Tx_PRU)
+ * @is_k3: flag used to identify the need for special load handling
+ */
+struct pru_private_data {
+ enum pru_type type;
+ unsigned int is_k3 : 1;
+};
+
+/**
+ * struct pru_rproc - PRU remoteproc structure
+ * @id: id of the PRU core within the PRUSS
+ * @dev: PRU core device pointer
+ * @pruss: back-reference to parent PRUSS structure
+ * @rproc: remoteproc pointer for this PRU core
+ * @data: PRU core specific data
+ * @mem_regions: data for each of the PRU memory regions
+ * @fw_name: name of firmware image used during loading
+ * @mapped_irq: virtual interrupt numbers of created fw specific mapping
+ * @pru_interrupt_map: pointer to interrupt mapping description (firmware)
+ * @pru_interrupt_map_sz: pru_interrupt_map size
+ * @dbg_single_step: debug state variable to set PRU into single step mode
+ * @dbg_continuous: debug state variable to restore PRU execution mode
+ * @evt_count: number of mapped events
+ */
+struct pru_rproc {
+ int id;
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *rproc;
+ const struct pru_private_data *data;
+ struct pruss_mem_region mem_regions[PRU_IOMEM_MAX];
+ const char *fw_name;
+ unsigned int *mapped_irq;
+ struct pru_irq_rsc *pru_interrupt_map;
+ size_t pru_interrupt_map_sz;
+ u32 dbg_single_step;
+ u32 dbg_continuous;
+ u8 evt_count;
+};
+
+static inline u32 pru_control_read_reg(struct pru_rproc *pru, unsigned int reg)
+{
+ return readl_relaxed(pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
+}
+
+static inline
+void pru_control_write_reg(struct pru_rproc *pru, unsigned int reg, u32 val)
+{
+ writel_relaxed(val, pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
+}
+
+static inline u32 pru_debug_read_reg(struct pru_rproc *pru, unsigned int reg)
+{
+ return readl_relaxed(pru->mem_regions[PRU_IOMEM_DEBUG].va + reg);
+}
+
+static int regs_show(struct seq_file *s, void *data)
+{
+ struct rproc *rproc = s->private;
+ struct pru_rproc *pru = rproc->priv;
+ int i, nregs = 32;
+ u32 pru_sts;
+ int pru_is_running;
+
+ seq_puts(s, "============== Control Registers ==============\n");
+ seq_printf(s, "CTRL := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTRL));
+ pru_sts = pru_control_read_reg(pru, PRU_CTRL_STS);
+ seq_printf(s, "STS (PC) := 0x%08x (0x%08x)\n", pru_sts, pru_sts << 2);
+ seq_printf(s, "WAKEUP_EN := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_WAKEUP_EN));
+ seq_printf(s, "CYCLE := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CYCLE));
+ seq_printf(s, "STALL := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_STALL));
+ seq_printf(s, "CTBIR0 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTBIR0));
+ seq_printf(s, "CTBIR1 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTBIR1));
+ seq_printf(s, "CTPPR0 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTPPR0));
+ seq_printf(s, "CTPPR1 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTPPR1));
+
+ seq_puts(s, "=============== Debug Registers ===============\n");
+ pru_is_running = pru_control_read_reg(pru, PRU_CTRL_CTRL) &
+ CTRL_CTRL_RUNSTATE;
+ if (pru_is_running) {
+ seq_puts(s, "PRU is executing, cannot print/access debug registers.\n");
+ return 0;
+ }
+
+ for (i = 0; i < nregs; i++) {
+ seq_printf(s, "GPREG%-2d := 0x%08x\tCT_REG%-2d := 0x%08x\n",
+ i, pru_debug_read_reg(pru, PRU_DEBUG_GPREG(i)),
+ i, pru_debug_read_reg(pru, PRU_DEBUG_CT_REG(i)));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(regs);
+
+/*
+ * Control PRU single-step mode
+ *
+ * This is a debug helper function used for controlling the single-step
+ * mode of the PRU. The PRU Debug registers are not accessible when the
+ * PRU is in RUNNING state.
+ *
+ * Writing a non-zero value sets the PRU into single-step mode irrespective
+ * of its previous state. The PRU mode is saved only on the first set into
+ * a single-step mode. Writing a zero value will restore the PRU into its
+ * original mode.
+ */
+static int pru_rproc_debug_ss_set(void *data, u64 val)
+{
+ struct rproc *rproc = data;
+ struct pru_rproc *pru = rproc->priv;
+ u32 reg_val;
+
+ val = val ? 1 : 0;
+ if (!val && !pru->dbg_single_step)
+ return 0;
+
+ reg_val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
+
+ if (val && !pru->dbg_single_step)
+ pru->dbg_continuous = reg_val;
+
+ if (val)
+ reg_val |= CTRL_CTRL_SINGLE_STEP | CTRL_CTRL_EN;
+ else
+ reg_val = pru->dbg_continuous;
+
+ pru->dbg_single_step = val;
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, reg_val);
+
+ return 0;
+}
+
+static int pru_rproc_debug_ss_get(void *data, u64 *val)
+{
+ struct rproc *rproc = data;
+ struct pru_rproc *pru = rproc->priv;
+
+ *val = pru->dbg_single_step;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get,
+ pru_rproc_debug_ss_set, "%llu\n");
+
+/*
+ * Create PRU-specific debugfs entries
+ *
+ * The entries are created only if the parent remoteproc debugfs directory
+ * exists, and will be cleaned up by the remoteproc core.
+ */
+static void pru_rproc_create_debug_entries(struct rproc *rproc)
+{
+ if (!rproc->dbg_dir)
+ return;
+
+ debugfs_create_file("regs", 0400, rproc->dbg_dir,
+ rproc, &regs_fops);
+ debugfs_create_file("single_step", 0600, rproc->dbg_dir,
+ rproc, &pru_rproc_debug_ss_fops);
+}
+
+static void pru_dispose_irq_mapping(struct pru_rproc *pru)
+{
+ while (pru->evt_count--) {
+ if (pru->mapped_irq[pru->evt_count] > 0)
+ irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
+ }
+
+ kfree(pru->mapped_irq);
+}
+
+/*
+ * Parse the custom PRU interrupt map resource and configure the INTC
+ * appropriately.
+ */
+static int pru_handle_intrmap(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct pru_rproc *pru = rproc->priv;
+ struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
+ struct irq_fwspec fwspec;
+ struct device_node *irq_parent;
+ int i, ret = 0;
+
+ /* not having pru_interrupt_map is not an error */
+ if (!rsc)
+ return 0;
+
+ /* currently supporting only type 0 */
+ if (rsc->type != 0) {
+ dev_err(dev, "unsupported rsc type: %d\n", rsc->type);
+ return -EINVAL;
+ }
+
+ if (rsc->num_evts > MAX_PRU_SYS_EVENTS)
+ return -EINVAL;
+
+ if (sizeof(*rsc) + rsc->num_evts * sizeof(struct pruss_int_map) !=
+ pru->pru_interrupt_map_sz)
+ return -EINVAL;
+
+ pru->evt_count = rsc->num_evts;
+ pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!pru->mapped_irq)
+ return -ENOMEM;
+
+ /*
+ * parse and fill in system event to interrupt channel and
+ * channel-to-host mapping
+ */
+ irq_parent = of_irq_find_parent(pru->dev->of_node);
+ if (!irq_parent) {
+ kfree(pru->mapped_irq);
+ return -ENODEV;
+ }
+
+ fwspec.fwnode = of_node_to_fwnode(irq_parent);
+ fwspec.param_count = 3;
+ for (i = 0; i < pru->evt_count; i++) {
+ fwspec.param[0] = rsc->pru_intc_map[i].event;
+ fwspec.param[1] = rsc->pru_intc_map[i].chnl;
+ fwspec.param[2] = rsc->pru_intc_map[i].host;
+
+ dev_dbg(dev, "mapping%d: event %d, chnl %d, host %d\n",
+ i, fwspec.param[0], fwspec.param[1], fwspec.param[2]);
+
+ pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
+ if (!pru->mapped_irq[i]) {
+ dev_err(dev, "failed to get virq\n");
+ ret = pru->mapped_irq[i];
+ goto map_fail;
+ }
+ }
+
+ return ret;
+
+map_fail:
+ pru_dispose_irq_mapping(pru);
+
+ return ret;
+}
+
+static int pru_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
+ u32 val;
+ int ret;
+
+ dev_dbg(dev, "starting %s%d: entry-point = 0x%llx\n",
+ names[pru->data->type], pru->id, (rproc->bootaddr >> 2));
+
+ ret = pru_handle_intrmap(rproc);
+ /*
+ * reset references to pru interrupt map - they will stop being valid
+ * after rproc_start returns
+ */
+ pru->pru_interrupt_map = NULL;
+ pru->pru_interrupt_map_sz = 0;
+ if (ret)
+ return ret;
+
+ val = CTRL_CTRL_EN | ((rproc->bootaddr >> 2) << 16);
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+
+ return 0;
+}
+
+static int pru_rproc_stop(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
+ u32 val;
+
+ dev_dbg(dev, "stopping %s%d\n", names[pru->data->type], pru->id);
+
+ val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
+ val &= ~CTRL_CTRL_EN;
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+
+ /* dispose irq mapping - new firmware can provide new mapping */
+ if (pru->mapped_irq)
+ pru_dispose_irq_mapping(pru);
+
+ return 0;
+}
+
+/*
+ * Convert PRU device address (data spaces only) to kernel virtual address.
+ *
+ * Each PRU has access to all data memories within the PRUSS, accessible at
+ * different ranges. So, look through both its primary and secondary Data
+ * RAMs as well as any shared Data RAM to convert a PRU device address to
+ * kernel virtual address. Data RAM0 is primary Data RAM for PRU0 and Data
+ * RAM1 is primary Data RAM for PRU1.
+ */
+static void *pru_d_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
+{
+ struct pruss_mem_region dram0, dram1, shrd_ram;
+ struct pruss *pruss = pru->pruss;
+ u32 offset;
+ void *va = NULL;
+
+ if (len == 0)
+ return NULL;
+
+ dram0 = pruss->mem_regions[PRUSS_MEM_DRAM0];
+ dram1 = pruss->mem_regions[PRUSS_MEM_DRAM1];
+ /* PRU1 has its local RAM addresses reversed */
+ if (pru->id == 1)
+ swap(dram0, dram1);
+ shrd_ram = pruss->mem_regions[PRUSS_MEM_SHRD_RAM2];
+
+ if (da >= PRU_PDRAM_DA && da + len <= PRU_PDRAM_DA + dram0.size) {
+ offset = da - PRU_PDRAM_DA;
+ va = (__force void *)(dram0.va + offset);
+ } else if (da >= PRU_SDRAM_DA &&
+ da + len <= PRU_SDRAM_DA + dram1.size) {
+ offset = da - PRU_SDRAM_DA;
+ va = (__force void *)(dram1.va + offset);
+ } else if (da >= PRU_SHRDRAM_DA &&
+ da + len <= PRU_SHRDRAM_DA + shrd_ram.size) {
+ offset = da - PRU_SHRDRAM_DA;
+ va = (__force void *)(shrd_ram.va + offset);
+ }
+
+ return va;
+}
+
+/*
+ * Convert PRU device address (instruction space) to kernel virtual address.
+ *
+ * A PRU does not have an unified address space. Each PRU has its very own
+ * private Instruction RAM, and its device address is identical to that of
+ * its primary Data RAM device address.
+ */
+static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
+{
+ u32 offset;
+ void *va = NULL;
+
+ if (len == 0)
+ return NULL;
+
+ if (da >= PRU_IRAM_DA &&
+ da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
+ offset = da - PRU_IRAM_DA;
+ va = (__force void *)(pru->mem_regions[PRU_IOMEM_IRAM].va +
+ offset);
+ }
+
+ return va;
+}
+
+/*
+ * Provide address translations for only PRU Data RAMs through the remoteproc
+ * core for any PRU client drivers. The PRU Instruction RAM access is restricted
+ * only to the PRU loader code.
+ */
+static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct pru_rproc *pru = rproc->priv;
+
+ return pru_d_da_to_va(pru, da, len);
+}
+
+/* PRU-specific address translator used by PRU loader. */
+static void *pru_da_to_va(struct rproc *rproc, u64 da, size_t len, bool is_iram)
+{
+ struct pru_rproc *pru = rproc->priv;
+ void *va;
+
+ if (is_iram)
+ va = pru_i_da_to_va(pru, da, len);
+ else
+ va = pru_d_da_to_va(pru, da, len);
+
+ return va;
+}
+
+static struct rproc_ops pru_rproc_ops = {
+ .start = pru_rproc_start,
+ .stop = pru_rproc_stop,
+ .da_to_va = pru_rproc_da_to_va,
+};
+
+/*
+ * Custom memory copy implementation for ICSSG PRU/RTU/Tx_PRU Cores
+ *
+ * The ICSSG PRU/RTU/Tx_PRU cores have a memory copying issue with IRAM
+ * memories, that is not seen on previous generation SoCs. The data is reflected
+ * properly in the IRAM memories only for integer (4-byte) copies. Any unaligned
+ * copies result in all the other pre-existing bytes zeroed out within that
+ * 4-byte boundary, thereby resulting in wrong text/code in the IRAMs. Also, the
+ * IRAM memory port interface does not allow any 8-byte copies (as commonly used
+ * by ARM64 memcpy implementation) and throws an exception. The DRAM memory
+ * ports do not show this behavior.
+ */
+static int pru_rproc_memcpy(void *dest, const void *src, size_t count)
+{
+ const u32 *s = src;
+ u32 *d = dest;
+ size_t size = count / 4;
+ u32 *tmp_src = NULL;
+
+ /*
+ * TODO: relax limitation of 4-byte aligned dest addresses and copy
+ * sizes
+ */
+ if ((long)dest % 4 || count % 4)
+ return -EINVAL;
+
+ /* src offsets in ELF firmware image can be non-aligned */
+ if ((long)src % 4) {
+ tmp_src = kmemdup(src, count, GFP_KERNEL);
+ if (!tmp_src)
+ return -ENOMEM;
+ s = tmp_src;
+ }
+
+ while (size--)
+ *d++ = *s++;
+
+ kfree(tmp_src);
+
+ return 0;
+}
+
+static int
+pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct pru_rproc *pru = rproc->priv;
+ struct device *dev = &rproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+ bool is_iram;
+ void *ptr;
+
+ if (phdr->p_type != PT_LOAD || !filesz)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ is_iram = phdr->p_flags & PF_X;
+ ptr = pru_da_to_va(rproc, da, memsz, is_iram);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (pru->data->is_k3 && is_iram) {
+ ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
+ filesz);
+ if (ret) {
+ dev_err(dev, "PRU memory copy failed for da 0x%x memsz 0x%x\n",
+ da, memsz);
+ break;
+ }
+ } else {
+ memcpy(ptr, elf_data + phdr->p_offset, filesz);
+ }
+
+ /* skip the memzero logic performed by remoteproc ELF loader */
+ }
+
+ return ret;
+}
+
+static const void *
+pru_rproc_find_interrupt_map(struct device *dev, const struct firmware *fw)
+{
+ struct elf32_shdr *shdr, *name_table_shdr;
+ const char *name_table;
+ const u8 *elf_data = fw->data;
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)elf_data;
+ u16 shnum = ehdr->e_shnum;
+ u16 shstrndx = ehdr->e_shstrndx;
+ int i;
+
+ /* first, get the section header */
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ /* compute name table section header entry in shdr array */
+ name_table_shdr = shdr + shstrndx;
+ /* finally, compute the name table section address in elf */
+ name_table = elf_data + name_table_shdr->sh_offset;
+
+ for (i = 0; i < shnum; i++, shdr++) {
+ u32 size = shdr->sh_size;
+ u32 offset = shdr->sh_offset;
+ u32 name = shdr->sh_name;
+
+ if (strcmp(name_table + name, ".pru_irq_map"))
+ continue;
+
+ /* make sure we have the entire irq map */
+ if (offset + size > fw->size || offset + size < size) {
+ dev_err(dev, ".pru_irq_map section truncated\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* make sure irq map has at least the header */
+ if (sizeof(struct pru_irq_rsc) > size) {
+ dev_err(dev, "header-less .pru_irq_map section\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return shdr;
+ }
+
+ dev_dbg(dev, "no .pru_irq_map section found for this fw\n");
+
+ return NULL;
+}
+
+/*
+ * Use a custom parse_fw callback function for dealing with PRU firmware
+ * specific sections.
+ *
+ * The firmware blob can contain optional ELF sections: .resource_table section
+ * and .pru_irq_map one. The second one contains the PRUSS interrupt mapping
+ * description, which needs to be setup before powering on the PRU core. To
+ * avoid RAM wastage this ELF section is not mapped to any ELF segment (by the
+ * firmware linker) and therefore is not loaded to PRU memory.
+ */
+static int pru_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const u8 *elf_data = fw->data;
+ const void *shdr;
+ u8 class = fw_elf_get_class(fw);
+ u64 sh_offset;
+ int ret;
+
+ /* load optional rsc table */
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret == -EINVAL)
+ dev_dbg(&rproc->dev, "no resource table found for this fw\n");
+ else if (ret)
+ return ret;
+
+ /* find .pru_interrupt_map section, not having it is not an error */
+ shdr = pru_rproc_find_interrupt_map(dev, fw);
+ if (IS_ERR(shdr))
+ return PTR_ERR(shdr);
+
+ if (!shdr)
+ return 0;
+
+ /* preserve pointer to PRU interrupt map together with it size */
+ sh_offset = elf_shdr_get_sh_offset(class, shdr);
+ pru->pru_interrupt_map = (struct pru_irq_rsc *)(elf_data + sh_offset);
+ pru->pru_interrupt_map_sz = elf_shdr_get_sh_size(class, shdr);
+
+ return 0;
+}
+
+/*
+ * Compute PRU id based on the IRAM addresses. The PRU IRAMs are
+ * always at a particular offset within the PRUSS address space.
+ */
+static int pru_rproc_set_id(struct pru_rproc *pru)
+{
+ int ret = 0;
+
+ switch (pru->mem_regions[PRU_IOMEM_IRAM].pa & PRU_IRAM_ADDR_MASK) {
+ case TX_PRU0_IRAM_ADDR_MASK:
+ fallthrough;
+ case RTU0_IRAM_ADDR_MASK:
+ fallthrough;
+ case PRU0_IRAM_ADDR_MASK:
+ pru->id = 0;
+ break;
+ case TX_PRU1_IRAM_ADDR_MASK:
+ fallthrough;
+ case RTU1_IRAM_ADDR_MASK:
+ fallthrough;
+ case PRU1_IRAM_ADDR_MASK:
+ pru->id = 1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int pru_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct platform_device *ppdev = to_platform_device(dev->parent);
+ struct pru_rproc *pru;
+ const char *fw_name;
+ struct rproc *rproc = NULL;
+ struct resource *res;
+ int i, ret;
+ const struct pru_private_data *data;
+ const char *mem_names[PRU_IOMEM_MAX] = { "iram", "control", "debug" };
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = of_property_read_string(np, "firmware-name", &fw_name);
+ if (ret) {
+ dev_err(dev, "unable to retrieve firmware-name %d\n", ret);
+ return ret;
+ }
+
+ rproc = devm_rproc_alloc(dev, pdev->name, &pru_rproc_ops, fw_name,
+ sizeof(*pru));
+ if (!rproc) {
+ dev_err(dev, "rproc_alloc failed\n");
+ return -ENOMEM;
+ }
+ /* use a custom load function to deal with PRU-specific quirks */
+ rproc->ops->load = pru_rproc_load_elf_segments;
+
+ /* use a custom parse function to deal with PRU-specific resources */
+ rproc->ops->parse_fw = pru_rproc_parse_fw;
+
+ /* error recovery is not supported for PRUs */
+ rproc->recovery_disabled = true;
+
+ /*
+ * rproc_add will auto-boot the processor normally, but this is not
+ * desired with PRU client driven boot-flow methodology. A PRU
+ * application/client driver will boot the corresponding PRU
+ * remote-processor as part of its state machine either through the
+ * remoteproc sysfs interface or through the equivalent kernel API.
+ */
+ rproc->auto_boot = false;
+
+ pru = rproc->priv;
+ pru->dev = dev;
+ pru->data = data;
+ pru->pruss = platform_get_drvdata(ppdev);
+ pru->rproc = rproc;
+ pru->fw_name = fw_name;
+
+ for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ pru->mem_regions[i].va = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pru->mem_regions[i].va)) {
+ dev_err(dev, "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ ret = PTR_ERR(pru->mem_regions[i].va);
+ return ret;
+ }
+ pru->mem_regions[i].pa = res->start;
+ pru->mem_regions[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pru->mem_regions[i].pa,
+ pru->mem_regions[i].size, pru->mem_regions[i].va);
+ }
+
+ ret = pru_rproc_set_id(pru);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = devm_rproc_add(dev, pru->rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed: %d\n", ret);
+ return ret;
+ }
+
+ pru_rproc_create_debug_entries(rproc);
+
+ dev_dbg(dev, "PRU rproc node %pOF probed successfully\n", np);
+
+ return 0;
+}
+
+static int pru_rproc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "%s: removing rproc %s\n", __func__, rproc->name);
+
+ return 0;
+}
+
+static const struct pru_private_data pru_data = {
+ .type = PRU_TYPE_PRU,
+};
+
+static const struct pru_private_data k3_pru_data = {
+ .type = PRU_TYPE_PRU,
+ .is_k3 = 1,
+};
+
+static const struct pru_private_data k3_rtu_data = {
+ .type = PRU_TYPE_RTU,
+ .is_k3 = 1,
+};
+
+static const struct pru_private_data k3_tx_pru_data = {
+ .type = PRU_TYPE_TX_PRU,
+ .is_k3 = 1,
+};
+
+static const struct of_device_id pru_rproc_match[] = {
+ { .compatible = "ti,am3356-pru", .data = &pru_data },
+ { .compatible = "ti,am4376-pru", .data = &pru_data },
+ { .compatible = "ti,am5728-pru", .data = &pru_data },
+ { .compatible = "ti,k2g-pru", .data = &pru_data },
+ { .compatible = "ti,am654-pru", .data = &k3_pru_data },
+ { .compatible = "ti,am654-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,am654-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,j721e-pru", .data = &k3_pru_data },
+ { .compatible = "ti,j721e-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,j721e-tx-pru", .data = &k3_tx_pru_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pru_rproc_match);
+
+static struct platform_driver pru_rproc_driver = {
+ .driver = {
+ .name = "pru-rproc",
+ .of_match_table = pru_rproc_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pru_rproc_probe,
+ .remove = pru_rproc_remove,
+};
+module_platform_driver(pru_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_AUTHOR("Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org>");
+MODULE_DESCRIPTION("PRU-ICSS Remote Processor Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/pru_rproc.h b/drivers/remoteproc/pru_rproc.h
new file mode 100644
index 000000000000..8ee9c3171610
--- /dev/null
+++ b/drivers/remoteproc/pru_rproc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * PRUSS Remote Processor specific types
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef _PRU_RPROC_H_
+#define _PRU_RPROC_H_
+
+/**
+ * struct pruss_int_map - PRU system events _to_ channel and host mapping
+ * @event: number of the system event
+ * @chnl: channel number assigned to a given @event
+ * @host: host number assigned to a given @chnl
+ *
+ * PRU system events are mapped to channels, and these channels are mapped
+ * to host interrupts. Events can be mapped to channels in a one-to-one or
+ * many-to-one ratio (multiple events per channel), and channels can be
+ * mapped to host interrupts in a one-to-one or many-to-one ratio (multiple
+ * channels per interrupt).
+ */
+struct pruss_int_map {
+ u8 event;
+ u8 chnl;
+ u8 host;
+};
+
+/**
+ * struct pru_irq_rsc - PRU firmware section header for IRQ data
+ * @type: resource type
+ * @num_evts: number of described events
+ * @pru_intc_map: PRU interrupt routing description
+ *
+ * The PRU firmware blob can contain optional .pru_irq_map ELF section, which
+ * provides the PRUSS interrupt mapping description. The pru_irq_rsc struct
+ * describes resource entry format.
+ */
+struct pru_irq_rsc {
+ u8 type;
+ u8 num_evts;
+ struct pruss_int_map pru_intc_map[];
+} __packed;
+
+#endif /* _PRU_RPROC_H_ */
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 085fd73fa23a..4b91e3c9eafa 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -17,6 +17,7 @@
#include <linux/rpmsg/qcom_smd.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem.h>
#include "remoteproc_internal.h"
#include "qcom_common.h"
@@ -25,6 +26,61 @@
#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
+#define MAX_NUM_OF_SS 10
+#define MAX_REGION_NAME_LENGTH 16
+#define SBL_MINIDUMP_SMEM_ID 602
+#define MD_REGION_VALID ('V' << 24 | 'A' << 16 | 'L' << 8 | 'I' << 0)
+#define MD_SS_ENCR_DONE ('D' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
+#define MD_SS_ENABLED ('E' << 24 | 'N' << 16 | 'B' << 8 | 'L' << 0)
+
+/**
+ * struct minidump_region - Minidump region
+ * @name : Name of the region to be dumped
+ * @seq_num: : Use to differentiate regions with same name.
+ * @valid : This entry to be dumped (if set to 1)
+ * @address : Physical address of region to be dumped
+ * @size : Size of the region
+ */
+struct minidump_region {
+ char name[MAX_REGION_NAME_LENGTH];
+ __le32 seq_num;
+ __le32 valid;
+ __le64 address;
+ __le64 size;
+};
+
+/**
+ * struct minidump_subsystem_toc: Subsystem's SMEM Table of content
+ * @status : Subsystem toc init status
+ * @enabled : if set to 1, this region would be copied during coredump
+ * @encryption_status: Encryption status for this subsystem
+ * @encryption_required : Decides to encrypt the subsystem regions or not
+ * @region_count : Number of regions added in this subsystem toc
+ * @regions_baseptr : regions base pointer of the subsystem
+ */
+struct minidump_subsystem {
+ __le32 status;
+ __le32 enabled;
+ __le32 encryption_status;
+ __le32 encryption_required;
+ __le32 region_count;
+ __le64 regions_baseptr;
+};
+
+/**
+ * struct minidump_global_toc: Global Table of Content
+ * @status : Global Minidump init status
+ * @md_revision : Minidump revision
+ * @enabled : Minidump enable status
+ * @subsystems : Array of subsystems toc
+ */
+struct minidump_global_toc {
+ __le32 status;
+ __le32 md_revision;
+ __le32 enabled;
+ struct minidump_subsystem subsystems[MAX_NUM_OF_SS];
+};
+
struct qcom_ssr_subsystem {
const char *name;
struct srcu_notifier_head notifier_list;
@@ -34,6 +90,96 @@ struct qcom_ssr_subsystem {
static LIST_HEAD(qcom_ssr_subsystem_list);
static DEFINE_MUTEX(qcom_ssr_subsys_lock);
+static void qcom_minidump_cleanup(struct rproc *rproc)
+{
+ struct rproc_dump_segment *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
+ list_del(&entry->node);
+ kfree(entry->priv);
+ kfree(entry);
+ }
+}
+
+static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsystem *subsystem)
+{
+ struct minidump_region __iomem *ptr;
+ struct minidump_region region;
+ int seg_cnt, i;
+ dma_addr_t da;
+ size_t size;
+ char *name;
+
+ if (WARN_ON(!list_empty(&rproc->dump_segments))) {
+ dev_err(&rproc->dev, "dump segment list already populated\n");
+ return -EUCLEAN;
+ }
+
+ seg_cnt = le32_to_cpu(subsystem->region_count);
+ ptr = ioremap((unsigned long)le64_to_cpu(subsystem->regions_baseptr),
+ seg_cnt * sizeof(struct minidump_region));
+ if (!ptr)
+ return -EFAULT;
+
+ for (i = 0; i < seg_cnt; i++) {
+ memcpy_fromio(&region, ptr + i, sizeof(region));
+ if (region.valid == MD_REGION_VALID) {
+ name = kstrdup(region.name, GFP_KERNEL);
+ if (!name) {
+ iounmap(ptr);
+ return -ENOMEM;
+ }
+ da = le64_to_cpu(region.address);
+ size = le32_to_cpu(region.size);
+ rproc_coredump_add_custom_segment(rproc, da, size, NULL, name);
+ }
+ }
+
+ iounmap(ptr);
+ return 0;
+}
+
+void qcom_minidump(struct rproc *rproc, unsigned int minidump_id)
+{
+ int ret;
+ struct minidump_subsystem *subsystem;
+ struct minidump_global_toc *toc;
+
+ /* Get Global minidump ToC*/
+ toc = qcom_smem_get(QCOM_SMEM_HOST_ANY, SBL_MINIDUMP_SMEM_ID, NULL);
+
+ /* check if global table pointer exists and init is set */
+ if (IS_ERR(toc) || !toc->status) {
+ dev_err(&rproc->dev, "Minidump TOC not found in SMEM\n");
+ return;
+ }
+
+ /* Get subsystem table of contents using the minidump id */
+ subsystem = &toc->subsystems[minidump_id];
+
+ /**
+ * Collect minidump if SS ToC is valid and segment table
+ * is initialized in memory and encryption status is set.
+ */
+ if (subsystem->regions_baseptr == 0 ||
+ le32_to_cpu(subsystem->status) != 1 ||
+ le32_to_cpu(subsystem->enabled) != MD_SS_ENABLED ||
+ le32_to_cpu(subsystem->encryption_status) != MD_SS_ENCR_DONE) {
+ dev_err(&rproc->dev, "Minidump not ready, skipping\n");
+ return;
+ }
+
+ ret = qcom_add_minidump_segments(rproc, subsystem);
+ if (ret) {
+ dev_err(&rproc->dev, "Failed with error: %d while adding minidump entries\n", ret);
+ goto clean_minidump;
+ }
+ rproc_coredump_using_sections(rproc);
+clean_minidump:
+ qcom_minidump_cleanup(rproc);
+}
+EXPORT_SYMBOL_GPL(qcom_minidump);
+
static int glink_subdev_start(struct rproc_subdev *subdev)
{
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h
index dfc641c3a98b..c35adf730be0 100644
--- a/drivers/remoteproc/qcom_common.h
+++ b/drivers/remoteproc/qcom_common.h
@@ -33,6 +33,8 @@ struct qcom_rproc_ssr {
struct qcom_ssr_subsystem *info;
};
+void qcom_minidump(struct rproc *rproc, unsigned int minidump_id);
+
void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
const char *ssr_name);
void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink);
@@ -51,6 +53,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
const char *name,
int ssctl_instance);
void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon);
+bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon);
#else
static inline struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
const char *name,
@@ -62,6 +65,11 @@ static inline struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
static inline void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon)
{
}
+
+static inline bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon)
+{
+ return false;
+}
#endif
#endif
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index fd6fd36268d9..9627a950928e 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -13,6 +13,7 @@
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/remoteproc.h>
+#include "qcom_common.h"
#include "qcom_q6v5.h"
#define Q6V5_PANIC_DELAY_MS 200
@@ -146,15 +147,20 @@ static irqreturn_t q6v5_stop_interrupt(int irq, void *data)
/**
* qcom_q6v5_request_stop() - request the remote processor to stop
* @q6v5: reference to qcom_q6v5 context
+ * @sysmon: reference to the remote's sysmon instance, or NULL
*
* Return: 0 on success, negative errno on failure
*/
-int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
+int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon)
{
int ret;
q6v5->running = false;
+ /* Don't perform SMP2P dance if sysmon already shut down the remote */
+ if (qcom_sysmon_shutdown_acked(sysmon))
+ return 0;
+
qcom_smem_state_update_bits(q6v5->state,
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h
index c4ed887c1499..1c212f670cbc 100644
--- a/drivers/remoteproc/qcom_q6v5.h
+++ b/drivers/remoteproc/qcom_q6v5.h
@@ -8,6 +8,7 @@
struct rproc;
struct qcom_smem_state;
+struct qcom_sysmon;
struct qcom_q6v5 {
struct device *dev;
@@ -40,7 +41,7 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5);
int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5);
-int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5);
+int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon);
int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout);
unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index efb2c1aa80a3..e02450225e4a 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -193,8 +193,10 @@ static int adsp_start(struct rproc *rproc)
dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX);
ret = pm_runtime_get_sync(adsp->dev);
- if (ret)
+ if (ret) {
+ pm_runtime_put_noidle(adsp->dev);
goto disable_xo_clk;
+ }
ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
if (ret) {
@@ -264,7 +266,7 @@ static int adsp_stop(struct rproc *rproc)
int handover;
int ret;
- ret = qcom_q6v5_request_stop(&adsp->q6v5);
+ ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon);
if (ret == -ETIMEDOUT)
dev_err(adsp->dev, "timed out on wait\n");
@@ -362,15 +364,12 @@ static int adsp_init_mmio(struct qcom_adsp *adsp,
struct platform_device *pdev)
{
struct device_node *syscon;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adsp->qdsp6ss_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!adsp->qdsp6ss_base) {
+ adsp->qdsp6ss_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adsp->qdsp6ss_base)) {
dev_err(adsp->dev, "failed to map QDSP6SS registers\n");
- return -ENOMEM;
+ return PTR_ERR(adsp->qdsp6ss_base);
}
syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0);
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index eb3457a6c3b7..66106ba25ba3 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -132,6 +132,7 @@ struct qcom_mss_reg_res {
struct rproc_hexagon_res {
const char *hexagon_mba_image;
struct qcom_mss_reg_res *proxy_supply;
+ struct qcom_mss_reg_res *fallback_proxy_supply;
struct qcom_mss_reg_res *active_supply;
char **proxy_clk_names;
char **reset_clk_names;
@@ -177,16 +178,17 @@ struct q6v5 {
int proxy_pd_count;
struct reg_info active_regs[1];
- struct reg_info proxy_regs[3];
+ struct reg_info proxy_regs[1];
+ struct reg_info fallback_proxy_regs[2];
int active_reg_count;
int proxy_reg_count;
+ int fallback_proxy_reg_count;
bool dump_mba_loaded;
size_t current_dump_size;
size_t total_dump_size;
phys_addr_t mba_phys;
- void *mba_region;
size_t mba_size;
size_t dp_size;
@@ -349,8 +351,11 @@ static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
ret = pm_runtime_get_sync(pds[i]);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(pds[i]);
+ dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
+ }
}
return 0;
@@ -405,7 +410,7 @@ static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
current_perm, next, perms);
}
-static void q6v5_debug_policy_load(struct q6v5 *qproc)
+static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
{
const struct firmware *dp_fw;
@@ -413,7 +418,7 @@ static void q6v5_debug_policy_load(struct q6v5 *qproc)
return;
if (SZ_1M + dp_fw->size <= qproc->mba_size) {
- memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size);
+ memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
qproc->dp_size = dp_fw->size;
}
@@ -423,6 +428,7 @@ static void q6v5_debug_policy_load(struct q6v5 *qproc)
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
+ void *mba_region;
/* MBA is restricted to a maximum size of 1M */
if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
@@ -430,8 +436,16 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
return -EINVAL;
}
- memcpy(qproc->mba_region, fw->data, fw->size);
- q6v5_debug_policy_load(qproc);
+ mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
+ if (!mba_region) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
+ &qproc->mba_phys, qproc->mba_size);
+ return -EBUSY;
+ }
+
+ memcpy(mba_region, fw->data, fw->size);
+ q6v5_debug_policy_load(qproc, mba_region);
+ memunmap(mba_region);
return 0;
}
@@ -538,6 +552,7 @@ static void q6v5_dump_mba_logs(struct q6v5 *qproc)
{
struct rproc *rproc = qproc->rproc;
void *data;
+ void *mba_region;
if (!qproc->has_mba_logs)
return;
@@ -546,12 +561,16 @@ static void q6v5_dump_mba_logs(struct q6v5 *qproc)
qproc->mba_size))
return;
- data = vmalloc(MBA_LOG_SIZE);
- if (!data)
+ mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
+ if (!mba_region)
return;
- memcpy(data, qproc->mba_region, MBA_LOG_SIZE);
- dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
+ data = vmalloc(MBA_LOG_SIZE);
+ if (data) {
+ memcpy(data, mba_region, MBA_LOG_SIZE);
+ dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
+ }
+ memunmap(mba_region);
}
static int q6v5proc_reset(struct q6v5 *qproc)
@@ -890,11 +909,18 @@ static int q6v5_mba_load(struct q6v5 *qproc)
goto disable_active_pds;
}
+ ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
+ goto disable_proxy_pds;
+ }
+
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy supplies\n");
- goto disable_proxy_pds;
+ goto disable_fallback_proxy_reg;
}
ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
@@ -1008,6 +1034,9 @@ disable_proxy_clk:
disable_proxy_reg:
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
+disable_fallback_proxy_reg:
+ q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
disable_proxy_pds:
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
disable_active_pds:
@@ -1063,6 +1092,8 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
qproc->proxy_pd_count);
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
+ q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
}
@@ -1179,7 +1210,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
- ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz);
+ ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
if (!ptr) {
dev_err(qproc->dev,
"unable to map memory region: %pa+%zx-%x\n",
@@ -1194,7 +1225,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
"failed to load segment %d from truncated file %s\n",
i, fw_name);
ret = -EINVAL;
- iounmap(ptr);
+ memunmap(ptr);
goto release_firmware;
}
@@ -1206,7 +1237,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
ptr, phdr->p_filesz);
if (ret) {
dev_err(qproc->dev, "failed to load %s\n", fw_name);
- iounmap(ptr);
+ memunmap(ptr);
goto release_firmware;
}
@@ -1217,7 +1248,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
memset(ptr + phdr->p_filesz, 0,
phdr->p_memsz - phdr->p_filesz);
}
- iounmap(ptr);
+ memunmap(ptr);
size += phdr->p_memsz;
code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
@@ -1284,11 +1315,11 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
}
if (!ret)
- ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size);
+ ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
if (ptr) {
memcpy(dest, ptr, size);
- iounmap(ptr);
+ memunmap(ptr);
} else {
memset(dest, 0xff, size);
}
@@ -1355,7 +1386,7 @@ static int q6v5_stop(struct rproc *rproc)
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
int ret;
- ret = qcom_q6v5_request_stop(&qproc->q6v5);
+ ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "timed out on wait\n");
@@ -1423,6 +1454,8 @@ static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
+ q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
}
@@ -1588,12 +1621,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
qproc->mba_phys = r.start;
qproc->mba_size = resource_size(&r);
- qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
- if (!qproc->mba_region) {
- dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, qproc->mba_size);
- return -EBUSY;
- }
if (!child) {
node = of_parse_phandle(qproc->dev->of_node,
@@ -1717,11 +1744,22 @@ static int q6v5_probe(struct platform_device *pdev)
ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
desc->proxy_pd_names);
- if (ret < 0) {
+ /* Fallback to regulators for old device trees */
+ if (ret == -ENODATA && desc->fallback_proxy_supply) {
+ ret = q6v5_regulator_init(&pdev->dev,
+ qproc->fallback_proxy_regs,
+ desc->fallback_proxy_supply);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
+ goto detach_active_pds;
+ }
+ qproc->fallback_proxy_reg_count = ret;
+ } else if (ret < 0) {
dev_err(&pdev->dev, "Failed to init power domains\n");
goto detach_active_pds;
+ } else {
+ qproc->proxy_pd_count = ret;
}
- qproc->proxy_pd_count = ret;
qproc->has_alt_reset = desc->has_alt_reset;
ret = q6v5_init_reset(qproc);
@@ -1923,6 +1961,13 @@ static const struct rproc_hexagon_res msm8916_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
{
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {}
+ },
+ .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
.supply = "mx",
.uV = 1050000,
},
@@ -1930,10 +1975,6 @@ static const struct rproc_hexagon_res msm8916_mss = {
.supply = "cx",
.uA = 100000,
},
- {
- .supply = "pll",
- .uA = 100000,
- },
{}
},
.proxy_clk_names = (char*[]){
@@ -1946,6 +1987,11 @@ static const struct rproc_hexagon_res msm8916_mss = {
"mem",
NULL
},
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
.need_mem_protection = false,
.has_alt_reset = false,
.has_mba_logs = false,
@@ -1957,6 +2003,13 @@ static const struct rproc_hexagon_res msm8974_mss = {
.hexagon_mba_image = "mba.b00",
.proxy_supply = (struct qcom_mss_reg_res[]) {
{
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {}
+ },
+ .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
.supply = "mx",
.uV = 1050000,
},
@@ -1964,10 +2017,6 @@ static const struct rproc_hexagon_res msm8974_mss = {
.supply = "cx",
.uA = 100000,
},
- {
- .supply = "pll",
- .uA = 100000,
- },
{}
},
.active_supply = (struct qcom_mss_reg_res[]) {
@@ -1988,6 +2037,11 @@ static const struct rproc_hexagon_res msm8974_mss = {
"mem",
NULL
},
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
.need_mem_protection = false,
.has_alt_reset = false,
.has_mba_logs = false,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 3837f23995e0..ee586226e438 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -33,6 +33,7 @@ struct adsp_data {
int crash_reason_smem;
const char *firmware_name;
int pas_id;
+ unsigned int minidump_id;
bool has_aggre2_clk;
bool auto_boot;
@@ -63,6 +64,7 @@ struct qcom_adsp {
int proxy_pd_count;
int pas_id;
+ unsigned int minidump_id;
int crash_reason_smem;
bool has_aggre2_clk;
const char *info_name;
@@ -81,6 +83,13 @@ struct qcom_adsp {
struct qcom_sysmon *sysmon;
};
+static void adsp_minidump(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = rproc->priv;
+
+ qcom_minidump(rproc, adsp->minidump_id);
+}
+
static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
@@ -90,8 +99,11 @@ static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
ret = pm_runtime_get_sync(pds[i]);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(pds[i]);
+ dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
+ }
}
return 0;
@@ -214,7 +226,7 @@ static int adsp_stop(struct rproc *rproc)
int handover;
int ret;
- ret = qcom_q6v5_request_stop(&adsp->q6v5);
+ ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon);
if (ret == -ETIMEDOUT)
dev_err(adsp->dev, "timed out on wait\n");
@@ -258,6 +270,15 @@ static const struct rproc_ops adsp_ops = {
.panic = adsp_panic,
};
+static const struct rproc_ops adsp_minidump_ops = {
+ .start = adsp_start,
+ .stop = adsp_stop,
+ .da_to_va = adsp_da_to_va,
+ .load = adsp_load,
+ .panic = adsp_panic,
+ .coredump = adsp_minidump,
+};
+
static int adsp_init_clock(struct qcom_adsp *adsp)
{
int ret;
@@ -383,6 +404,7 @@ static int adsp_probe(struct platform_device *pdev)
struct qcom_adsp *adsp;
struct rproc *rproc;
const char *fw_name;
+ const struct rproc_ops *ops = &adsp_ops;
int ret;
desc = of_device_get_match_data(&pdev->dev);
@@ -398,8 +420,11 @@ static int adsp_probe(struct platform_device *pdev)
if (ret < 0 && ret != -EINVAL)
return ret;
- rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
- fw_name, sizeof(*adsp));
+ if (desc->minidump_id)
+ ops = &adsp_minidump_ops;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp));
+
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
@@ -411,6 +436,7 @@ static int adsp_probe(struct platform_device *pdev)
adsp = (struct qcom_adsp *)rproc->priv;
adsp->dev = &pdev->dev;
adsp->rproc = rproc;
+ adsp->minidump_id = desc->minidump_id;
adsp->pas_id = desc->pas_id;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
adsp->info_name = desc->sysmon_name;
@@ -607,6 +633,7 @@ static const struct adsp_data mpss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
+ .minidump_id = 3,
.has_aggre2_clk = false,
.auto_boot = false,
.active_pd_names = (char*[]){
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 8846ef0b0f1a..78ebe1168b33 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -390,7 +390,7 @@ static int q6v5_wcss_stop(struct rproc *rproc)
int ret;
/* WCSS powerdown */
- ret = qcom_q6v5_request_stop(&wcss->q6v5);
+ ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL);
if (ret == -ETIMEDOUT) {
dev_err(wcss->dev, "timed out on wait\n");
return ret;
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index 9eb2f6bccea6..9fca81492863 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -22,6 +22,9 @@ struct qcom_sysmon {
struct rproc_subdev subdev;
struct rproc *rproc;
+ int state;
+ struct mutex state_lock;
+
struct list_head node;
const char *name;
@@ -41,6 +44,7 @@ struct qcom_sysmon {
struct mutex lock;
bool ssr_ack;
+ bool shutdown_acked;
struct qmi_handle qmi;
struct sockaddr_qrtr ssctl;
@@ -112,10 +116,13 @@ out_unlock:
/**
* sysmon_request_shutdown() - request graceful shutdown of remote
* @sysmon: sysmon context
+ *
+ * Return: boolean indicator of the remote processor acking the request
*/
-static void sysmon_request_shutdown(struct qcom_sysmon *sysmon)
+static bool sysmon_request_shutdown(struct qcom_sysmon *sysmon)
{
char *req = "ssr:shutdown";
+ bool acked = false;
int ret;
mutex_lock(&sysmon->lock);
@@ -138,9 +145,13 @@ static void sysmon_request_shutdown(struct qcom_sysmon *sysmon)
if (!sysmon->ssr_ack)
dev_err(sysmon->dev,
"unexpected response to sysmon shutdown request\n");
+ else
+ acked = true;
out_unlock:
mutex_unlock(&sysmon->lock);
+
+ return acked;
}
static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count,
@@ -283,7 +294,7 @@ static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
complete(&sysmon->ind_comp);
}
-static struct qmi_msg_handler qmi_indication_handler[] = {
+static const struct qmi_msg_handler qmi_indication_handler[] = {
{
.type = QMI_INDICATION,
.msg_id = SSCTL_SHUTDOWN_READY_IND,
@@ -294,14 +305,33 @@ static struct qmi_msg_handler qmi_indication_handler[] = {
{}
};
+static bool ssctl_request_shutdown_wait(struct qcom_sysmon *sysmon)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&sysmon->shutdown_comp, 10 * HZ);
+ if (ret)
+ return true;
+
+ ret = try_wait_for_completion(&sysmon->ind_comp);
+ if (ret)
+ return true;
+
+ dev_err(sysmon->dev, "timeout waiting for shutdown ack\n");
+ return false;
+}
+
/**
* ssctl_request_shutdown() - request shutdown via SSCTL QMI service
* @sysmon: sysmon context
+ *
+ * Return: boolean indicator of the remote processor acking the request
*/
-static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
+static bool ssctl_request_shutdown(struct qcom_sysmon *sysmon)
{
struct ssctl_shutdown_resp resp;
struct qmi_txn txn;
+ bool acked = false;
int ret;
reinit_completion(&sysmon->ind_comp);
@@ -309,7 +339,7 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp);
if (ret < 0) {
dev_err(sysmon->dev, "failed to allocate QMI txn\n");
- return;
+ return false;
}
ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn,
@@ -317,27 +347,23 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
if (ret < 0) {
dev_err(sysmon->dev, "failed to send shutdown request\n");
qmi_txn_cancel(&txn);
- return;
+ return false;
}
ret = qmi_txn_wait(&txn, 5 * HZ);
- if (ret < 0)
- dev_err(sysmon->dev, "failed receiving QMI response\n");
- else if (resp.resp.result)
- dev_err(sysmon->dev, "shutdown request failed\n");
- else
+ if (ret < 0) {
+ dev_err(sysmon->dev, "timeout waiting for shutdown response\n");
+ } else if (resp.resp.result) {
+ dev_err(sysmon->dev, "shutdown request rejected\n");
+ } else {
dev_dbg(sysmon->dev, "shutdown request completed\n");
-
- if (sysmon->shutdown_irq > 0) {
- ret = wait_for_completion_timeout(&sysmon->shutdown_comp,
- 10 * HZ);
- if (!ret) {
- ret = try_wait_for_completion(&sysmon->ind_comp);
- if (!ret)
- dev_err(sysmon->dev,
- "timeout waiting for shutdown ack\n");
- }
+ acked = true;
}
+
+ if (sysmon->shutdown_irq > 0)
+ return ssctl_request_shutdown_wait(sysmon);
+
+ return acked;
}
/**
@@ -371,18 +397,18 @@ static void ssctl_send_event(struct qcom_sysmon *sysmon,
SSCTL_SUBSYS_EVENT_REQ, 40,
ssctl_subsys_event_req_ei, &req);
if (ret < 0) {
- dev_err(sysmon->dev, "failed to send shutdown request\n");
+ dev_err(sysmon->dev, "failed to send subsystem event\n");
qmi_txn_cancel(&txn);
return;
}
ret = qmi_txn_wait(&txn, 5 * HZ);
if (ret < 0)
- dev_err(sysmon->dev, "failed receiving QMI response\n");
+ dev_err(sysmon->dev, "timeout waiting for subsystem event response\n");
else if (resp.resp.result)
- dev_err(sysmon->dev, "ssr event send failed\n");
+ dev_err(sysmon->dev, "subsystem event rejected\n");
else
- dev_dbg(sysmon->dev, "ssr event send completed\n");
+ dev_dbg(sysmon->dev, "subsystem event accepted\n");
}
/**
@@ -448,7 +474,10 @@ static int sysmon_prepare(struct rproc_subdev *subdev)
.ssr_event = SSCTL_SSR_EVENT_BEFORE_POWERUP
};
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_BEFORE_POWERUP;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
return 0;
}
@@ -472,20 +501,25 @@ static int sysmon_start(struct rproc_subdev *subdev)
.ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
};
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
mutex_lock(&sysmon_lock);
list_for_each_entry(target, &sysmon_list, node) {
- if (target == sysmon ||
- target->rproc->state != RPROC_RUNNING)
+ if (target == sysmon)
continue;
+ mutex_lock(&target->state_lock);
event.subsys_name = target->name;
+ event.ssr_event = target->state;
if (sysmon->ssctl_version == 2)
ssctl_send_event(sysmon, &event);
else if (sysmon->ept)
sysmon_send_event(sysmon, &event);
+ mutex_unlock(&target->state_lock);
}
mutex_unlock(&sysmon_lock);
@@ -500,16 +534,21 @@ static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
.ssr_event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN
};
+ sysmon->shutdown_acked = false;
+
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
/* Don't request graceful shutdown if we've crashed */
if (crashed)
return;
if (sysmon->ssctl_version)
- ssctl_request_shutdown(sysmon);
+ sysmon->shutdown_acked = ssctl_request_shutdown(sysmon);
else if (sysmon->ept)
- sysmon_request_shutdown(sysmon);
+ sysmon->shutdown_acked = sysmon_request_shutdown(sysmon);
}
static void sysmon_unprepare(struct rproc_subdev *subdev)
@@ -521,7 +560,10 @@ static void sysmon_unprepare(struct rproc_subdev *subdev)
.ssr_event = SSCTL_SSR_EVENT_AFTER_SHUTDOWN
};
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_AFTER_SHUTDOWN;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
}
/**
@@ -534,11 +576,10 @@ static int sysmon_notify(struct notifier_block *nb, unsigned long event,
void *data)
{
struct qcom_sysmon *sysmon = container_of(nb, struct qcom_sysmon, nb);
- struct rproc *rproc = sysmon->rproc;
struct sysmon_event *sysmon_event = data;
/* Skip non-running rprocs and the originating instance */
- if (rproc->state != RPROC_RUNNING ||
+ if (sysmon->state != SSCTL_SSR_EVENT_AFTER_POWERUP ||
!strcmp(sysmon_event->subsys_name, sysmon->name)) {
dev_dbg(sysmon->dev, "not notifying %s\n", sysmon->name);
return NOTIFY_DONE;
@@ -591,6 +632,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
init_completion(&sysmon->ind_comp);
init_completion(&sysmon->shutdown_comp);
mutex_init(&sysmon->lock);
+ mutex_init(&sysmon->state_lock);
sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node,
"shutdown-ack");
@@ -665,6 +707,22 @@ void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon)
EXPORT_SYMBOL_GPL(qcom_remove_sysmon_subdev);
/**
+ * qcom_sysmon_shutdown_acked() - query the success of the last shutdown
+ * @sysmon: sysmon context
+ *
+ * When sysmon is used to request a graceful shutdown of the remote processor
+ * this can be used by the remoteproc driver to query the success, in order to
+ * know if it should fall back to other means of requesting a shutdown.
+ *
+ * Return: boolean indicator of the success of the last shutdown request
+ */
+bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon)
+{
+ return sysmon && sysmon->shutdown_acked;
+}
+EXPORT_SYMBOL_GPL(qcom_sysmon_shutdown_acked);
+
+/**
* sysmon_probe() - probe sys_mon channel
* @rpdev: rpmsg device handle
*
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index e2573f79a137..f95854255c70 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -17,6 +17,8 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
@@ -51,12 +53,15 @@
#define WCNSS_PMU_XO_MODE_19p2 0
#define WCNSS_PMU_XO_MODE_48 3
+#define WCNSS_MAX_PDS 2
+
struct wcnss_data {
size_t pmu_offset;
size_t spare_offset;
+ const char *pd_names[WCNSS_MAX_PDS];
const struct wcnss_vreg_info *vregs;
- size_t num_vregs;
+ size_t num_vregs, num_pd_vregs;
};
struct qcom_wcnss {
@@ -80,6 +85,8 @@ struct qcom_wcnss {
struct mutex iris_lock;
struct qcom_iris *iris;
+ struct device *pds[WCNSS_MAX_PDS];
+ size_t num_pds;
struct regulator_bulk_data *vregs;
size_t num_vregs;
@@ -111,24 +118,28 @@ static const struct wcnss_data pronto_v1_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
+ .pd_names = { "mx", "cx" },
.vregs = (struct wcnss_vreg_info[]) {
{ "vddmx", 950000, 1150000, 0 },
{ "vddcx", .super_turbo = true},
{ "vddpx", 1800000, 1800000, 0 },
},
- .num_vregs = 3,
+ .num_pd_vregs = 2,
+ .num_vregs = 1,
};
static const struct wcnss_data pronto_v2_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
+ .pd_names = { "mx", "cx" },
.vregs = (struct wcnss_vreg_info[]) {
{ "vddmx", 1287500, 1287500, 0 },
{ "vddcx", .super_turbo = true },
{ "vddpx", 1800000, 1800000, 0 },
},
- .num_vregs = 3,
+ .num_pd_vregs = 2,
+ .num_vregs = 1,
};
void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
@@ -219,7 +230,7 @@ static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
static int wcnss_start(struct rproc *rproc)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
- int ret;
+ int ret, i;
mutex_lock(&wcnss->iris_lock);
if (!wcnss->iris) {
@@ -228,9 +239,18 @@ static int wcnss_start(struct rproc *rproc)
goto release_iris_lock;
}
+ for (i = 0; i < wcnss->num_pds; i++) {
+ dev_pm_genpd_set_performance_state(wcnss->pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(wcnss->pds[i]);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wcnss->pds[i]);
+ goto disable_pds;
+ }
+ }
+
ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
if (ret)
- goto release_iris_lock;
+ goto disable_pds;
ret = qcom_iris_enable(wcnss->iris);
if (ret)
@@ -262,6 +282,11 @@ disable_iris:
qcom_iris_disable(wcnss->iris);
disable_regulators:
regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
+disable_pds:
+ for (i--; i >= 0; i--) {
+ pm_runtime_put(wcnss->pds[i]);
+ dev_pm_genpd_set_performance_state(wcnss->pds[i], 0);
+ }
release_iris_lock:
mutex_unlock(&wcnss->iris_lock);
@@ -371,14 +396,54 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
+static int wcnss_init_pds(struct qcom_wcnss *wcnss,
+ const char * const pd_names[WCNSS_MAX_PDS])
+{
+ int i, ret;
+
+ for (i = 0; i < WCNSS_MAX_PDS; i++) {
+ if (!pd_names[i])
+ break;
+
+ wcnss->pds[i] = dev_pm_domain_attach_by_name(wcnss->dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(wcnss->pds[i])) {
+ ret = PTR_ERR(wcnss->pds[i]) ? : -ENODATA;
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(wcnss->pds[i], false);
+ return ret;
+ }
+ }
+ wcnss->num_pds = i;
+
+ return 0;
+}
+
+static void wcnss_release_pds(struct qcom_wcnss *wcnss)
+{
+ int i;
+
+ for (i = 0; i < wcnss->num_pds; i++)
+ dev_pm_domain_detach(wcnss->pds[i], false);
+}
+
static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
const struct wcnss_vreg_info *info,
- int num_vregs)
+ int num_vregs, int num_pd_vregs)
{
struct regulator_bulk_data *bulk;
int ret;
int i;
+ /*
+ * If attaching the power domains suceeded we can skip requesting
+ * the regulators for the power domains. For old device trees we need to
+ * reserve extra space to manage them through the regulator interface.
+ */
+ if (wcnss->num_pds)
+ info += num_pd_vregs;
+ else
+ num_vregs += num_pd_vregs;
+
bulk = devm_kcalloc(wcnss->dev,
num_vregs, sizeof(struct regulator_bulk_data),
GFP_KERNEL);
@@ -514,33 +579,42 @@ static int wcnss_probe(struct platform_device *pdev)
wcnss->pmu_cfg = mmio + data->pmu_offset;
wcnss->spare_out = mmio + data->spare_offset;
- ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs);
- if (ret)
+ /*
+ * We might need to fallback to regulators instead of power domains
+ * for old device trees. Don't report an error in that case.
+ */
+ ret = wcnss_init_pds(wcnss, data->pd_names);
+ if (ret && (ret != -ENODATA || !data->num_pd_vregs))
goto free_rproc;
+ ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
+ data->num_pd_vregs);
+ if (ret)
+ goto detach_pds;
+
ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->wdog_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->fatal_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->ready_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->handover_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->stop_ack_irq = ret;
if (wcnss->stop_ack_irq) {
@@ -548,7 +622,7 @@ static int wcnss_probe(struct platform_device *pdev)
&wcnss->stop_bit);
if (IS_ERR(wcnss->state)) {
ret = PTR_ERR(wcnss->state);
- goto free_rproc;
+ goto detach_pds;
}
}
@@ -556,15 +630,17 @@ static int wcnss_probe(struct platform_device *pdev)
wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
if (IS_ERR(wcnss->sysmon)) {
ret = PTR_ERR(wcnss->sysmon);
- goto free_rproc;
+ goto detach_pds;
}
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ goto detach_pds;
return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+detach_pds:
+ wcnss_release_pds(wcnss);
free_rproc:
rproc_free(rproc);
@@ -582,6 +658,7 @@ static int wcnss_remove(struct platform_device *pdev)
qcom_remove_sysmon_subdev(wcnss->sysmon);
qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
+ wcnss_release_pds(wcnss);
rproc_free(wcnss->rproc);
return 0;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index dab2c0f5caf0..2394eef383e3 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1704,7 +1704,7 @@ int rproc_trigger_recovery(struct rproc *rproc)
goto unlock_mutex;
/* generate coredump */
- rproc_coredump(rproc);
+ rproc->ops->coredump(rproc);
/* load firmware */
ret = request_firmware(&firmware_p, rproc->firmware, dev);
@@ -1934,6 +1934,69 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
#endif
EXPORT_SYMBOL(rproc_get_by_phandle);
+/**
+ * rproc_set_firmware() - assign a new firmware
+ * @rproc: rproc handle to which the new firmware is being assigned
+ * @fw_name: new firmware name to be assigned
+ *
+ * This function allows remoteproc drivers or clients to configure a custom
+ * firmware name that is different from the default name used during remoteproc
+ * registration. The function does not trigger a remote processor boot,
+ * only sets the firmware name used for a subsequent boot. This function
+ * should also be called only when the remote processor is offline.
+ *
+ * This allows either the userspace to configure a different name through
+ * sysfs or a kernel-level remoteproc or a remoteproc client driver to set
+ * a specific firmware when it is controlling the boot and shutdown of the
+ * remote processor.
+ *
+ * Return: 0 on success or a negative value upon failure
+ */
+int rproc_set_firmware(struct rproc *rproc, const char *fw_name)
+{
+ struct device *dev;
+ int ret, len;
+ char *p;
+
+ if (!rproc || !fw_name)
+ return -EINVAL;
+
+ dev = rproc->dev.parent;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return -EINVAL;
+ }
+
+ if (rproc->state != RPROC_OFFLINE) {
+ dev_err(dev, "can't change firmware while running\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ len = strcspn(fw_name, "\n");
+ if (!len) {
+ dev_err(dev, "can't provide empty string for firmware name\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ p = kstrndup(fw_name, len, GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ kfree(rproc->firmware);
+ rproc->firmware = p;
+
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_set_firmware);
+
static int rproc_validate(struct rproc *rproc)
{
switch (rproc->state) {
@@ -2126,6 +2189,10 @@ static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops)
if (!rproc->ops)
return -ENOMEM;
+ /* Default to rproc_coredump if no coredump function is specified */
+ if (!rproc->ops->coredump)
+ rproc->ops->coredump = rproc_coredump;
+
if (rproc->ops->load)
return 0;
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
index 34530dc20cb4..81ec154a6a5e 100644
--- a/drivers/remoteproc/remoteproc_coredump.c
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -323,3 +323,143 @@ void rproc_coredump(struct rproc *rproc)
*/
wait_for_completion(&dump_state.dump_done);
}
+
+/**
+ * rproc_coredump_using_sections() - perform coredump using section headers
+ * @rproc: rproc handle
+ *
+ * This function will generate an ELF header for the registered sections of
+ * segments and create a devcoredump device associated with rproc. Based on
+ * the coredump configuration this function will directly copy the segments
+ * from device memory to userspace or copy segments from device memory to
+ * a separate buffer, which can then be read by userspace.
+ * The first approach avoids using extra vmalloc memory. But it will stall
+ * recovery flow until dump is read by userspace.
+ */
+void rproc_coredump_using_sections(struct rproc *rproc)
+{
+ struct rproc_dump_segment *segment;
+ void *shdr;
+ void *ehdr;
+ size_t data_size;
+ size_t strtbl_size = 0;
+ size_t strtbl_index = 1;
+ size_t offset;
+ void *data;
+ u8 class = rproc->elf_class;
+ int shnum;
+ struct rproc_coredump_state dump_state;
+ unsigned int dump_conf = rproc->dump_conf;
+ char *str_tbl = "STR_TBL";
+
+ if (list_empty(&rproc->dump_segments) ||
+ dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
+ if (class == ELFCLASSNONE) {
+ dev_err(&rproc->dev, "Elf class is not set\n");
+ return;
+ }
+
+ /*
+ * We allocate two extra section headers. The first one is null.
+ * Second section header is for the string table. Also space is
+ * allocated for string table.
+ */
+ data_size = elf_size_of_hdr(class) + 2 * elf_size_of_shdr(class);
+ shnum = 2;
+
+ /* the extra byte is for the null character at index 0 */
+ strtbl_size += strlen(str_tbl) + 2;
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ data_size += elf_size_of_shdr(class);
+ strtbl_size += strlen(segment->priv) + 1;
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ data_size += segment->size;
+ shnum++;
+ }
+
+ data_size += strtbl_size;
+
+ data = vmalloc(data_size);
+ if (!data)
+ return;
+
+ ehdr = data;
+ memset(ehdr, 0, elf_size_of_hdr(class));
+ /* e_ident field is common for both elf32 and elf64 */
+ elf_hdr_init_ident(ehdr, class);
+
+ elf_hdr_set_e_type(class, ehdr, ET_CORE);
+ elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
+ elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+ elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+ elf_hdr_set_e_shoff(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_shentsize(class, ehdr, elf_size_of_shdr(class));
+ elf_hdr_set_e_shnum(class, ehdr, shnum);
+ elf_hdr_set_e_shstrndx(class, ehdr, 1);
+
+ /*
+ * The zeroth index of the section header is reserved and is rarely used.
+ * Set the section header as null (SHN_UNDEF) and move to the next one.
+ */
+ shdr = data + elf_hdr_get_e_shoff(class, ehdr);
+ memset(shdr, 0, elf_size_of_shdr(class));
+ shdr += elf_size_of_shdr(class);
+
+ /* Initialize the string table. */
+ offset = elf_hdr_get_e_shoff(class, ehdr) +
+ elf_size_of_shdr(class) * elf_hdr_get_e_shnum(class, ehdr);
+ memset(data + offset, 0, strtbl_size);
+
+ /* Fill in the string table section header. */
+ memset(shdr, 0, elf_size_of_shdr(class));
+ elf_shdr_set_sh_type(class, shdr, SHT_STRTAB);
+ elf_shdr_set_sh_offset(class, shdr, offset);
+ elf_shdr_set_sh_size(class, shdr, strtbl_size);
+ elf_shdr_set_sh_entsize(class, shdr, 0);
+ elf_shdr_set_sh_flags(class, shdr, 0);
+ elf_shdr_set_sh_name(class, shdr, elf_strtbl_add(str_tbl, ehdr, class, &strtbl_index));
+ offset += elf_shdr_get_sh_size(class, shdr);
+ shdr += elf_size_of_shdr(class);
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ memset(shdr, 0, elf_size_of_shdr(class));
+ elf_shdr_set_sh_type(class, shdr, SHT_PROGBITS);
+ elf_shdr_set_sh_offset(class, shdr, offset);
+ elf_shdr_set_sh_addr(class, shdr, segment->da);
+ elf_shdr_set_sh_size(class, shdr, segment->size);
+ elf_shdr_set_sh_entsize(class, shdr, 0);
+ elf_shdr_set_sh_flags(class, shdr, SHF_WRITE);
+ elf_shdr_set_sh_name(class, shdr,
+ elf_strtbl_add(segment->priv, ehdr, class, &strtbl_index));
+
+ /* No need to copy segments for inline dumps */
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ rproc_copy_segment(rproc, data + offset, segment, 0,
+ segment->size);
+ offset += elf_shdr_get_sh_size(class, shdr);
+ shdr += elf_size_of_shdr(class);
+ }
+
+ if (dump_conf == RPROC_COREDUMP_ENABLED) {
+ dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
+ return;
+ }
+
+ /* Initialize the dump state struct to be used by rproc_coredump_read */
+ dump_state.rproc = rproc;
+ dump_state.header = data;
+ init_completion(&dump_state.dump_done);
+
+ dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
+ rproc_coredump_read, rproc_coredump_free);
+
+ /* Wait until the dump is read and free is called. Data is freed
+ * by devcoredump framework automatically after 5 minutes.
+ */
+ wait_for_completion(&dump_state.dump_done);
+}
+EXPORT_SYMBOL(rproc_coredump_using_sections);
diff --git a/drivers/remoteproc/remoteproc_elf_helpers.h b/drivers/remoteproc/remoteproc_elf_helpers.h
index 4b6be7b6bf4d..26404e68e17a 100644
--- a/drivers/remoteproc/remoteproc_elf_helpers.h
+++ b/drivers/remoteproc/remoteproc_elf_helpers.h
@@ -65,6 +65,7 @@ ELF_GEN_FIELD_GET_SET(hdr, e_type, u16)
ELF_GEN_FIELD_GET_SET(hdr, e_version, u32)
ELF_GEN_FIELD_GET_SET(hdr, e_ehsize, u32)
ELF_GEN_FIELD_GET_SET(hdr, e_phentsize, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_shentsize, u16)
ELF_GEN_FIELD_GET_SET(phdr, p_paddr, u64)
ELF_GEN_FIELD_GET_SET(phdr, p_vaddr, u64)
@@ -75,6 +76,9 @@ ELF_GEN_FIELD_GET_SET(phdr, p_offset, u64)
ELF_GEN_FIELD_GET_SET(phdr, p_flags, u32)
ELF_GEN_FIELD_GET_SET(phdr, p_align, u64)
+ELF_GEN_FIELD_GET_SET(shdr, sh_type, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_flags, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_entsize, u16)
ELF_GEN_FIELD_GET_SET(shdr, sh_size, u64)
ELF_GEN_FIELD_GET_SET(shdr, sh_offset, u64)
ELF_GEN_FIELD_GET_SET(shdr, sh_name, u32)
@@ -93,4 +97,26 @@ ELF_STRUCT_SIZE(shdr)
ELF_STRUCT_SIZE(phdr)
ELF_STRUCT_SIZE(hdr)
+static inline unsigned int elf_strtbl_add(const char *name, void *ehdr, u8 class, size_t *index)
+{
+ u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr);
+ void *shdr;
+ char *strtab;
+ size_t idx, ret;
+
+ shdr = ehdr + elf_size_of_hdr(class) + shstrndx * elf_size_of_shdr(class);
+ strtab = ehdr + elf_shdr_get_sh_offset(class, shdr);
+ idx = index ? *index : 0;
+ if (!strtab || !name)
+ return 0;
+
+ ret = idx;
+ strcpy((strtab + idx), name);
+ idx += strlen(name) + 1;
+ if (index)
+ *index = idx;
+
+ return ret;
+}
+
#endif /* REMOTEPROC_ELF_LOADER_H */
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index d1cf7bf277c4..1dbef895e65e 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -154,38 +154,9 @@ static ssize_t firmware_store(struct device *dev,
const char *buf, size_t count)
{
struct rproc *rproc = to_rproc(dev);
- char *p;
- int err, len = count;
+ int err;
- err = mutex_lock_interruptible(&rproc->lock);
- if (err) {
- dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, err);
- return -EINVAL;
- }
-
- if (rproc->state != RPROC_OFFLINE) {
- dev_err(dev, "can't change firmware while running\n");
- err = -EBUSY;
- goto out;
- }
-
- len = strcspn(buf, "\n");
- if (!len) {
- dev_err(dev, "can't provide a NULL firmware\n");
- err = -EINVAL;
- goto out;
- }
-
- p = kstrndup(buf, len, GFP_KERNEL);
- if (!p) {
- err = -ENOMEM;
- goto out;
- }
-
- kfree(rproc->firmware);
- rproc->firmware = p;
-out:
- mutex_unlock(&rproc->lock);
+ err = rproc_set_firmware(rproc, buf);
return err ? err : count;
}
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index d2414cc1d90d..a180aeae9675 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -541,7 +541,7 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid)
}
}
-static struct rproc_ops st_rproc_ops = {
+static const struct rproc_ops st_rproc_ops = {
.start = stm32_rproc_start,
.stop = stm32_rproc_stop,
.attach = stm32_rproc_attach,
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index 9011e477290c..863c0214e0a8 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -445,10 +445,10 @@ static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
resource_size(res));
- if (IS_ERR(kproc->mem[i].cpu_addr)) {
+ if (!kproc->mem[i].cpu_addr) {
dev_err(dev, "failed to map %s memory\n",
data->mems[i].name);
- return PTR_ERR(kproc->mem[i].cpu_addr);
+ return -ENOMEM;
}
kproc->mem[i].bus_addr = res->start;
kproc->mem[i].dev_addr = data->mems[i].dev_addr;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index d9307935441d..62b5a4c29456 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -38,6 +38,8 @@
#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
+/* Available from J7200 SoCs onwards */
+#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
/* R5 TI-SCI Processor Control Flags */
#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
@@ -68,15 +70,27 @@ enum cluster_mode {
};
/**
+ * struct k3_r5_soc_data - match data to handle SoC variations
+ * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
+ * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
+ */
+struct k3_r5_soc_data {
+ bool tcm_is_double;
+ bool tcm_ecc_autoinit;
+};
+
+/**
* struct k3_r5_cluster - K3 R5F Cluster structure
* @dev: cached device pointer
* @mode: Mode to configure the Cluster - Split or LockStep
* @cores: list of R5 cores within the cluster
+ * @soc_data: SoC-specific feature data for a R5FSS
*/
struct k3_r5_cluster {
struct device *dev;
enum cluster_mode mode;
struct list_head cores;
+ const struct k3_r5_soc_data *soc_data;
};
/**
@@ -362,8 +376,16 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
struct k3_r5_cluster *cluster = kproc->cluster;
struct k3_r5_core *core = kproc->core;
struct device *dev = kproc->dev;
+ u32 ctrl = 0, cfg = 0, stat = 0;
+ u64 boot_vec = 0;
+ bool mem_init_dis;
int ret;
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
+ if (ret < 0)
+ return ret;
+ mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
+
ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
if (ret) {
@@ -373,6 +395,17 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
}
/*
+ * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
+ * of TCMs, so there is no need to perform the s/w memzero. This bit is
+ * configurable through System Firmware, the default value does perform
+ * auto-init, but account for it in case it is disabled
+ */
+ if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) {
+ dev_dbg(dev, "leveraging h/w init for TCM memories\n");
+ return 0;
+ }
+
+ /*
* Zero out both TCMs unconditionally (access from v8 Arm core is not
* affected by ATCM & BTCM enable configuration values) so that ECC
* can be effective on all TCM addresses.
@@ -855,6 +888,43 @@ static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
of_reserved_mem_device_release(kproc->dev);
}
+/*
+ * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
+ * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
+ * cores are usable in Split-mode, but only the Core0 TCMs can be used in
+ * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
+ * leveraging the Core1 TCMs as well in certain modes where they would have
+ * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs). This is done by
+ * making a Core1 TCM visible immediately after the corresponding Core0 TCM.
+ * The SoC memory map uses the larger 64 KB sizes for the Core0 TCMs, and the
+ * dts representation reflects this increased size on supported SoCs. The Core0
+ * TCM sizes therefore have to be adjusted to only half the original size in
+ * Split mode.
+ */
+static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *cdev = core->dev;
+ struct k3_r5_core *core0;
+
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ !cluster->soc_data->tcm_is_double)
+ return;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (core == core0) {
+ WARN_ON(core->mem[0].size != SZ_64K);
+ WARN_ON(core->mem[1].size != SZ_64K);
+
+ core->mem[0].size /= 2;
+ core->mem[1].size /= 2;
+
+ dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
+ core->mem[0].size, core->mem[1].size);
+ }
+}
+
static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
@@ -902,6 +972,8 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
goto err_config;
}
+ k3_r5_adjust_tcm_sizes(kproc);
+
ret = k3_r5_reserved_mem_init(kproc);
if (ret) {
dev_err(dev, "reserved memory init failed, ret = %d\n",
@@ -940,9 +1012,9 @@ out:
return ret;
}
-static int k3_r5_cluster_rproc_exit(struct platform_device *pdev)
+static void k3_r5_cluster_rproc_exit(void *data)
{
- struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct k3_r5_cluster *cluster = platform_get_drvdata(data);
struct k3_r5_rproc *kproc;
struct k3_r5_core *core;
struct rproc *rproc;
@@ -967,8 +1039,6 @@ static int k3_r5_cluster_rproc_exit(struct platform_device *pdev)
rproc_free(rproc);
core->rproc = NULL;
}
-
- return 0;
}
static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
@@ -1255,9 +1325,9 @@ static void k3_r5_core_of_exit(struct platform_device *pdev)
devres_release_group(dev, k3_r5_core_of_init);
}
-static void k3_r5_cluster_of_exit(struct platform_device *pdev)
+static void k3_r5_cluster_of_exit(void *data)
{
- struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct k3_r5_cluster *cluster = platform_get_drvdata(data);
struct platform_device *cpdev;
struct k3_r5_core *core, *temp;
@@ -1311,15 +1381,23 @@ static int k3_r5_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(dev);
struct k3_r5_cluster *cluster;
+ const struct k3_r5_soc_data *data;
int ret;
int num_cores;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(dev, "SoC-specific data is not defined\n");
+ return -ENODEV;
+ }
+
cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
if (!cluster)
return -ENOMEM;
cluster->dev = dev;
cluster->mode = CLUSTER_MODE_LOCKSTEP;
+ cluster->soc_data = data;
INIT_LIST_HEAD(&cluster->cores);
ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
@@ -1351,9 +1429,7 @@ static int k3_r5_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))k3_r5_cluster_of_exit,
- pdev);
+ ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev);
if (ret)
return ret;
@@ -1364,18 +1440,27 @@ static int k3_r5_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))k3_r5_cluster_rproc_exit,
- pdev);
+ ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev);
if (ret)
return ret;
return 0;
}
+static const struct k3_r5_soc_data am65_j721e_soc_data = {
+ .tcm_is_double = false,
+ .tcm_ecc_autoinit = false,
+};
+
+static const struct k3_r5_soc_data j7200_soc_data = {
+ .tcm_is_double = true,
+ .tcm_ecc_autoinit = true,
+};
+
static const struct of_device_id k3_r5_of_match[] = {
- { .compatible = "ti,am654-r5fss", },
- { .compatible = "ti,j721e-r5fss", },
+ { .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
+ { .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
+ { .compatible = "ti,j7200-r5fss", .data = &j7200_soc_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_r5_of_match);
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index b9349d684258..92d387dfc03b 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
+#include <linux/reset.h>
#include <linux/platform_data/wkup_m3.h>
@@ -43,11 +44,13 @@ struct wkup_m3_mem {
* @rproc: rproc handle
* @pdev: pointer to platform device
* @mem: WkupM3 memory information
+ * @rsts: reset control
*/
struct wkup_m3_rproc {
struct rproc *rproc;
struct platform_device *pdev;
struct wkup_m3_mem mem[WKUPM3_MEM_MAX];
+ struct reset_control *rsts;
};
static int wkup_m3_rproc_start(struct rproc *rproc)
@@ -56,13 +59,16 @@ static int wkup_m3_rproc_start(struct rproc *rproc)
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
- if (pdata->deassert_reset(pdev, pdata->reset_name)) {
+ error = reset_control_deassert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->deassert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to reset wkup_m3!\n");
- return -ENODEV;
+ error = -ENODEV;
}
- return 0;
+ return error;
}
static int wkup_m3_rproc_stop(struct rproc *rproc)
@@ -71,13 +77,16 @@ static int wkup_m3_rproc_stop(struct rproc *rproc)
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
- if (pdata->assert_reset(pdev, pdata->reset_name)) {
+ error = reset_control_assert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->assert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to assert reset of wkup_m3!\n");
- return -ENODEV;
+ error = -ENODEV;
}
- return 0;
+ return error;
}
static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
@@ -132,12 +141,6 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
int ret;
int i;
- if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
- pdata->reset_name)) {
- dev_err(dev, "Platform data missing!\n");
- return -ENODEV;
- }
-
ret = of_property_read_string(dev->of_node, "ti,pm-firmware",
&fw_name);
if (ret) {
@@ -165,6 +168,18 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
wkupm3->rproc = rproc;
wkupm3->pdev = pdev;
+ wkupm3->rsts = devm_reset_control_get_optional_shared(dev, "rstctrl");
+ if (IS_ERR(wkupm3->rsts))
+ return PTR_ERR(wkupm3->rsts);
+ if (!wkupm3->rsts) {
+ if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
+ pdata->reset_name)) {
+ dev_err(dev, "Platform data missing!\n");
+ ret = -ENODEV;
+ goto err_put_rproc;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
@@ -173,7 +188,7 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n",
i);
ret = PTR_ERR(wkupm3->mem[i].cpu_addr);
- goto err;
+ goto err_put_rproc;
}
wkupm3->mem[i].bus_addr = res->start;
wkupm3->mem[i].size = resource_size(res);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 07d162b179fc..71ab75a46491 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -35,6 +35,13 @@ config RESET_AXS10X
help
This enables the reset controller driver for AXS10x.
+config RESET_BCM6345
+ bool "BCM6345 Reset Controller"
+ depends on BMIPS_GENERIC || COMPILE_TEST
+ default BMIPS_GENERIC
+ help
+ This enables the reset controller driver for BCM6345 SoCs.
+
config RESET_BERLIN
bool "Berlin Reset Driver" if COMPILE_TEST
default ARCH_BERLIN
@@ -95,7 +102,8 @@ config RESET_LPC18XX
This enables the reset controller driver for NXP LPC18xx/43xx SoCs.
config RESET_MESON
- bool "Meson Reset Driver" if COMPILE_TEST
+ tristate "Meson Reset Driver"
+ depends on ARCH_MESON || COMPILE_TEST
default ARCH_MESON
help
This enables the reset driver for Amlogic Meson SoCs.
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 16947610cc3b..1054123fd187 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
+obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index a2df88e90011..34e89aa0fb5e 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -208,6 +208,39 @@ static int reset_control_array_reset(struct reset_control_array *resets)
return 0;
}
+static int reset_control_array_rearm(struct reset_control_array *resets)
+{
+ struct reset_control *rstc;
+ int i;
+
+ for (i = 0; i < resets->num_rstcs; i++) {
+ rstc = resets->rstc[i];
+
+ if (!rstc)
+ continue;
+
+ if (WARN_ON(IS_ERR(rstc)))
+ return -EINVAL;
+
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+ } else {
+ if (!rstc->acquired)
+ return -EPERM;
+ }
+ }
+
+ for (i = 0; i < resets->num_rstcs; i++) {
+ rstc = resets->rstc[i];
+
+ if (rstc && rstc->shared)
+ WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
+ }
+
+ return 0;
+}
+
static int reset_control_array_assert(struct reset_control_array *resets)
{
int ret, i;
@@ -326,6 +359,46 @@ int reset_control_reset(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_reset);
/**
+ * reset_control_rearm - allow shared reset line to be re-triggered"
+ * @rstc: reset controller
+ *
+ * On a shared reset line the actual reset pulse is only triggered once for the
+ * lifetime of the reset_control instance, except if this call is used.
+ *
+ * Calls to this function must be balanced with calls to reset_control_reset,
+ * a warning is thrown in case triggered_count ever dips below 0.
+ *
+ * Consumers must not use reset_control_(de)assert on shared reset lines when
+ * reset_control_reset or reset_control_rearm have been used.
+ *
+ * If rstc is NULL the function will just return 0.
+ */
+int reset_control_rearm(struct reset_control *rstc)
+{
+ if (!rstc)
+ return 0;
+
+ if (WARN_ON(IS_ERR(rstc)))
+ return -EINVAL;
+
+ if (reset_control_is_array(rstc))
+ return reset_control_array_rearm(rstc_to_array(rstc));
+
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+
+ WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
+ } else {
+ if (!rstc->acquired)
+ return -EPERM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(reset_control_rearm);
+
+/**
* reset_control_assert - asserts the reset line
* @rstc: reset controller
*
diff --git a/drivers/reset/reset-bcm6345.c b/drivers/reset/reset-bcm6345.c
new file mode 100644
index 000000000000..737e4e81f6b7
--- /dev/null
+++ b/drivers/reset/reset-bcm6345.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BCM6345 Reset Controller Driver
+ *
+ * Copyright (C) 2020 Álvaro Fernández Rojas <noltari@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+#define BCM6345_RESET_NUM 32
+#define BCM6345_RESET_SLEEP_MIN_US 10000
+#define BCM6345_RESET_SLEEP_MAX_US 20000
+
+struct bcm6345_reset {
+ struct reset_controller_dev rcdev;
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+static inline struct bcm6345_reset *
+to_bcm6345_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct bcm6345_reset, rcdev);
+}
+
+static int bcm6345_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct bcm6345_reset *bcm6345_reset = to_bcm6345_reset(rcdev);
+ unsigned long flags;
+ uint32_t val;
+
+ spin_lock_irqsave(&bcm6345_reset->lock, flags);
+ val = __raw_readl(bcm6345_reset->base);
+ if (assert)
+ val &= ~BIT(id);
+ else
+ val |= BIT(id);
+ __raw_writel(val, bcm6345_reset->base);
+ spin_unlock_irqrestore(&bcm6345_reset->lock, flags);
+
+ return 0;
+}
+
+static int bcm6345_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return bcm6345_reset_update(rcdev, id, true);
+}
+
+static int bcm6345_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return bcm6345_reset_update(rcdev, id, false);
+}
+
+static int bcm6345_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ bcm6345_reset_update(rcdev, id, true);
+ usleep_range(BCM6345_RESET_SLEEP_MIN_US,
+ BCM6345_RESET_SLEEP_MAX_US);
+
+ bcm6345_reset_update(rcdev, id, false);
+ /*
+ * Ensure component is taken out reset state by sleeping also after
+ * deasserting the reset. Otherwise, the component may not be ready
+ * for operation.
+ */
+ usleep_range(BCM6345_RESET_SLEEP_MIN_US,
+ BCM6345_RESET_SLEEP_MAX_US);
+
+ return 0;
+}
+
+static int bcm6345_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct bcm6345_reset *bcm6345_reset = to_bcm6345_reset(rcdev);
+
+ return !(__raw_readl(bcm6345_reset->base) & BIT(id));
+}
+
+static struct reset_control_ops bcm6345_reset_ops = {
+ .assert = bcm6345_reset_assert,
+ .deassert = bcm6345_reset_deassert,
+ .reset = bcm6345_reset_reset,
+ .status = bcm6345_reset_status,
+};
+
+static int bcm6345_reset_probe(struct platform_device *pdev)
+{
+ struct bcm6345_reset *bcm6345_reset;
+
+ bcm6345_reset = devm_kzalloc(&pdev->dev,
+ sizeof(*bcm6345_reset), GFP_KERNEL);
+ if (!bcm6345_reset)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, bcm6345_reset);
+
+ bcm6345_reset->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bcm6345_reset->base))
+ return PTR_ERR(bcm6345_reset->base);
+
+ spin_lock_init(&bcm6345_reset->lock);
+ bcm6345_reset->rcdev.ops = &bcm6345_reset_ops;
+ bcm6345_reset->rcdev.owner = THIS_MODULE;
+ bcm6345_reset->rcdev.of_node = pdev->dev.of_node;
+ bcm6345_reset->rcdev.of_reset_n_cells = 1;
+ bcm6345_reset->rcdev.nr_resets = BCM6345_RESET_NUM;
+
+ return devm_reset_controller_register(&pdev->dev,
+ &bcm6345_reset->rcdev);
+}
+
+static const struct of_device_id bcm6345_reset_of_match[] = {
+ { .compatible = "brcm,bcm6345-reset" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver bcm6345_reset_driver = {
+ .probe = bcm6345_reset_probe,
+ .driver = {
+ .name = "bcm6345-reset",
+ .of_match_table = bcm6345_reset_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(bcm6345_reset_driver);
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index 94d7ba88d7d2..c9bc325ad65a 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
@@ -104,6 +105,7 @@ static const struct of_device_id meson_reset_dt_ids[] = {
{ .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param},
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
static int meson_reset_probe(struct platform_device *pdev)
{
@@ -142,4 +144,8 @@ static struct platform_driver meson_reset_driver = {
.of_match_table = meson_reset_dt_ids,
},
};
-builtin_platform_driver(meson_reset_driver);
+module_platform_driver(meson_reset_driver);
+
+MODULE_DESCRIPTION("Amlogic Meson Reset Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index bdd984296196..2a72f861f798 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -44,7 +44,7 @@ static int a10_reset_init(struct device_node *np)
data->membase = ioremap(res.start, size);
if (!data->membase) {
ret = -ENOMEM;
- goto err_alloc;
+ goto release_region;
}
if (of_property_read_u32(np, "altr,modrst-offset", &reg_offset))
@@ -59,7 +59,14 @@ static int a10_reset_init(struct device_node *np)
data->rcdev.of_node = np;
data->status_active_low = true;
- return reset_controller_register(&data->rcdev);
+ ret = reset_controller_register(&data->rcdev);
+ if (ret)
+ pr_err("unable to register device\n");
+
+ return ret;
+
+release_region:
+ release_mem_region(res.start, size);
err_alloc:
kfree(data);
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
index ef97c4dbbb4e..218370faf37b 100644
--- a/drivers/reset/reset-ti-syscon.c
+++ b/drivers/reset/reset-ti-syscon.c
@@ -89,7 +89,7 @@ static int ti_syscon_reset_assert(struct reset_controller_dev *rcdev,
mask = BIT(control->assert_bit);
value = (control->flags & ASSERT_SET) ? mask : 0x0;
- return regmap_update_bits(data->regmap, control->assert_offset, mask, value);
+ return regmap_write_bits(data->regmap, control->assert_offset, mask, value);
}
/**
@@ -120,7 +120,7 @@ static int ti_syscon_reset_deassert(struct reset_controller_dev *rcdev,
mask = BIT(control->deassert_bit);
value = (control->flags & DEASSERT_SET) ? mask : 0x0;
- return regmap_update_bits(data->regmap, control->deassert_offset, mask, value);
+ return regmap_write_bits(data->regmap, control->deassert_offset, mask, value);
}
/**
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index f96716893c2a..0b4407abdf13 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -15,6 +15,14 @@ config RPMSG_CHAR
in /dev. They make it possible for user-space programs to send and
receive rpmsg packets.
+config RPMSG_NS
+ tristate "RPMSG name service announcement"
+ depends on RPMSG
+ help
+ Say Y here to enable the support of the name service announcement
+ channel that probes the associated RPMsg device on remote endpoint
+ service announcement.
+
config RPMSG_MTK_SCP
tristate "MediaTek SCP"
depends on MTK_SCP
@@ -62,6 +70,7 @@ config RPMSG_VIRTIO
tristate "Virtio RPMSG bus driver"
depends on HAS_DMA
select RPMSG
+ select RPMSG_NS
select VIRTIO
endmenu
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index ffe932ef6050..8d452656f0ee 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RPMSG) += rpmsg_core.o
obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
+obj-$(CONFIG_RPMSG_NS) += rpmsg_ns.o
obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o
qcom_glink-objs := qcom_glink_native.o qcom_glink_ssr.o
obj-$(CONFIG_RPMSG_QCOM_GLINK) += qcom_glink.o
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 91de940896e3..e5daee4f9373 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -21,6 +21,50 @@
#include "rpmsg_internal.h"
/**
+ * rpmsg_create_channel() - create a new rpmsg channel
+ * using its name and address info.
+ * @rpdev: rpmsg device
+ * @chinfo: channel_info to bind
+ *
+ * Returns a pointer to the new rpmsg device on success, or NULL on error.
+ */
+struct rpmsg_device *rpmsg_create_channel(struct rpmsg_device *rpdev,
+ struct rpmsg_channel_info *chinfo)
+{
+ if (WARN_ON(!rpdev))
+ return NULL;
+ if (!rpdev->ops || !rpdev->ops->create_channel) {
+ dev_err(&rpdev->dev, "no create_channel ops found\n");
+ return NULL;
+ }
+
+ return rpdev->ops->create_channel(rpdev, chinfo);
+}
+EXPORT_SYMBOL(rpmsg_create_channel);
+
+/**
+ * rpmsg_release_channel() - release a rpmsg channel
+ * using its name and address info.
+ * @rpdev: rpmsg device
+ * @chinfo: channel_info to bind
+ *
+ * Returns 0 on success or an appropriate error value.
+ */
+int rpmsg_release_channel(struct rpmsg_device *rpdev,
+ struct rpmsg_channel_info *chinfo)
+{
+ if (WARN_ON(!rpdev))
+ return -EINVAL;
+ if (!rpdev->ops || !rpdev->ops->release_channel) {
+ dev_err(&rpdev->dev, "no release_channel ops found\n");
+ return -ENXIO;
+ }
+
+ return rpdev->ops->release_channel(rpdev, chinfo);
+}
+EXPORT_SYMBOL(rpmsg_release_channel);
+
+/**
* rpmsg_create_ept() - create a new rpmsg_endpoint
* @rpdev: rpmsg channel device
* @cb: rx callback handler
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 3fc83cd50e98..a76c344253bf 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -20,6 +20,8 @@
/**
* struct rpmsg_device_ops - indirection table for the rpmsg_device operations
+ * @create_channel: create backend-specific channel, optional
+ * @release_channel: release backend-specific channel, optional
* @create_ept: create backend-specific endpoint, required
* @announce_create: announce presence of new channel, optional
* @announce_destroy: announce destruction of channel, optional
@@ -29,6 +31,10 @@
* advertise new channels implicitly by creating the endpoints.
*/
struct rpmsg_device_ops {
+ struct rpmsg_device *(*create_channel)(struct rpmsg_device *rpdev,
+ struct rpmsg_channel_info *chinfo);
+ int (*release_channel)(struct rpmsg_device *rpdev,
+ struct rpmsg_channel_info *chinfo);
struct rpmsg_endpoint *(*create_ept)(struct rpmsg_device *rpdev,
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo);
@@ -68,13 +74,13 @@ struct rpmsg_endpoint_ops {
poll_table *wait);
};
-int rpmsg_register_device(struct rpmsg_device *rpdev);
-int rpmsg_unregister_device(struct device *parent,
- struct rpmsg_channel_info *chinfo);
-
struct device *rpmsg_find_device(struct device *parent,
struct rpmsg_channel_info *chinfo);
+struct rpmsg_device *rpmsg_create_channel(struct rpmsg_device *rpdev,
+ struct rpmsg_channel_info *chinfo);
+int rpmsg_release_channel(struct rpmsg_device *rpdev,
+ struct rpmsg_channel_info *chinfo);
/**
* rpmsg_chrdev_register_device() - register chrdev device based on rpdev
* @rpdev: prepared rpdev to be used for creating endpoints
diff --git a/drivers/rpmsg/rpmsg_ns.c b/drivers/rpmsg/rpmsg_ns.c
new file mode 100644
index 000000000000..762ff1ae279f
--- /dev/null
+++ b/drivers/rpmsg/rpmsg_ns.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2020 - All Rights Reserved
+ */
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rpmsg.h>
+#include <linux/rpmsg/ns.h>
+#include <linux/slab.h>
+
+#include "rpmsg_internal.h"
+
+/**
+ * rpmsg_ns_register_device() - register name service device based on rpdev
+ * @rpdev: prepared rpdev to be used for creating endpoints
+ *
+ * This function wraps rpmsg_register_device() preparing the rpdev for use as
+ * basis for the rpmsg name service device.
+ */
+int rpmsg_ns_register_device(struct rpmsg_device *rpdev)
+{
+ strcpy(rpdev->id.name, "rpmsg_ns");
+ rpdev->driver_override = "rpmsg_ns";
+ rpdev->src = RPMSG_NS_ADDR;
+ rpdev->dst = RPMSG_NS_ADDR;
+
+ return rpmsg_register_device(rpdev);
+}
+EXPORT_SYMBOL(rpmsg_ns_register_device);
+
+/* invoked when a name service announcement arrives */
+static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ struct rpmsg_ns_msg *msg = data;
+ struct rpmsg_device *newch;
+ struct rpmsg_channel_info chinfo;
+ struct device *dev = rpdev->dev.parent;
+ int ret;
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+ dynamic_hex_dump("NS announcement: ", DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+#endif
+
+ if (len != sizeof(*msg)) {
+ dev_err(dev, "malformed ns msg (%d)\n", len);
+ return -EINVAL;
+ }
+
+ /* don't trust the remote processor for null terminating the name */
+ msg->name[RPMSG_NAME_SIZE - 1] = '\0';
+
+ strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = rpmsg32_to_cpu(rpdev, msg->addr);
+
+ dev_info(dev, "%sing channel %s addr 0x%x\n",
+ rpmsg32_to_cpu(rpdev, msg->flags) & RPMSG_NS_DESTROY ?
+ "destroy" : "creat", msg->name, chinfo.dst);
+
+ if (rpmsg32_to_cpu(rpdev, msg->flags) & RPMSG_NS_DESTROY) {
+ ret = rpmsg_release_channel(rpdev, &chinfo);
+ if (ret)
+ dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
+ } else {
+ newch = rpmsg_create_channel(rpdev, &chinfo);
+ if (!newch)
+ dev_err(dev, "rpmsg_create_channel failed\n");
+ }
+
+ return 0;
+}
+
+static int rpmsg_ns_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_endpoint *ns_ept;
+ struct rpmsg_channel_info ns_chinfo = {
+ .src = RPMSG_NS_ADDR,
+ .dst = RPMSG_NS_ADDR,
+ .name = "name_service",
+ };
+
+ /*
+ * Create the NS announcement service endpoint associated to the RPMsg
+ * device. The endpoint will be automatically destroyed when the RPMsg
+ * device will be deleted.
+ */
+ ns_ept = rpmsg_create_ept(rpdev, rpmsg_ns_cb, NULL, ns_chinfo);
+ if (!ns_ept) {
+ dev_err(&rpdev->dev, "failed to create the ns ept\n");
+ return -ENOMEM;
+ }
+ rpdev->ept = ns_ept;
+
+ return 0;
+}
+
+static struct rpmsg_driver rpmsg_ns_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .probe = rpmsg_ns_probe,
+};
+
+static int rpmsg_ns_init(void)
+{
+ int ret;
+
+ ret = register_rpmsg_driver(&rpmsg_ns_driver);
+ if (ret < 0)
+ pr_err("%s: Failed to register rpmsg driver\n", __func__);
+
+ return ret;
+}
+postcore_initcall(rpmsg_ns_init);
+
+static void rpmsg_ns_exit(void)
+{
+ unregister_rpmsg_driver(&rpmsg_ns_driver);
+}
+module_exit(rpmsg_ns_exit);
+
+MODULE_DESCRIPTION("Name service announcement rpmsg driver");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_ALIAS("rpmsg:" KBUILD_MODNAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 7d7ed4e5cce7..e87d4cf926eb 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -19,11 +19,12 @@
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/rpmsg.h>
+#include <linux/rpmsg/byteorder.h>
+#include <linux/rpmsg/ns.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/virtio.h>
-#include <linux/virtio_byteorder.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
#include <linux/wait.h>
@@ -48,7 +49,6 @@
* @endpoints_lock: lock of the endpoints set
* @sendq: wait queue of sending contexts waiting for a tx buffers
* @sleepers: number of senders that are waiting for a tx buffer
- * @ns_ept: the bus's name service endpoint
*
* This structure stores the rpmsg state of a given virtio remote processor
* device (there might be several virtio proc devices for each physical
@@ -67,7 +67,6 @@ struct virtproc_info {
struct mutex endpoints_lock;
wait_queue_head_t sendq;
atomic_t sleepers;
- struct rpmsg_endpoint *ns_ept;
};
/* The feature bitmap for virtio rpmsg */
@@ -85,42 +84,14 @@ struct virtproc_info {
* Every message sent(/received) on the rpmsg bus begins with this header.
*/
struct rpmsg_hdr {
- __virtio32 src;
- __virtio32 dst;
- __virtio32 reserved;
- __virtio16 len;
- __virtio16 flags;
+ __rpmsg32 src;
+ __rpmsg32 dst;
+ __rpmsg32 reserved;
+ __rpmsg16 len;
+ __rpmsg16 flags;
u8 data[];
} __packed;
-/**
- * struct rpmsg_ns_msg - dynamic name service announcement message
- * @name: name of remote service that is published
- * @addr: address of remote service that is published
- * @flags: indicates whether service is created or destroyed
- *
- * This message is sent across to publish a new service, or announce
- * about its removal. When we receive these messages, an appropriate
- * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
- * or ->remove() handler of the appropriate rpmsg driver will be invoked
- * (if/as-soon-as one is registered).
- */
-struct rpmsg_ns_msg {
- char name[RPMSG_NAME_SIZE];
- __virtio32 addr;
- __virtio32 flags;
-} __packed;
-
-/**
- * enum rpmsg_ns_flags - dynamic name service announcement flags
- *
- * @RPMSG_NS_CREATE: a new remote service was just created
- * @RPMSG_NS_DESTROY: a known remote service was just destroyed
- */
-enum rpmsg_ns_flags {
- RPMSG_NS_CREATE = 0,
- RPMSG_NS_DESTROY = 1,
-};
/**
* struct virtio_rpmsg_channel - rpmsg channel descriptor
@@ -167,9 +138,6 @@ struct virtio_rpmsg_channel {
*/
#define RPMSG_RESERVED_ADDRESSES (1024)
-/* Address 53 is reserved for advertising remote services */
-#define RPMSG_NS_ADDR (53)
-
static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept);
static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
@@ -181,6 +149,8 @@ static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
int len, u32 dst);
static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
u32 dst, void *data, int len);
+static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
+ struct rpmsg_channel_info *chinfo);
static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
.destroy_ept = virtio_rpmsg_destroy_ept,
@@ -285,6 +255,24 @@ free_ept:
return NULL;
}
+static struct rpmsg_device *virtio_rpmsg_create_channel(struct rpmsg_device *rpdev,
+ struct rpmsg_channel_info *chinfo)
+{
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+ struct virtproc_info *vrp = vch->vrp;
+
+ return __rpmsg_create_channel(vrp, chinfo);
+}
+
+static int virtio_rpmsg_release_channel(struct rpmsg_device *rpdev,
+ struct rpmsg_channel_info *chinfo)
+{
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+ struct virtproc_info *vrp = vch->vrp;
+
+ return rpmsg_unregister_device(&vrp->vdev->dev, chinfo);
+}
+
static struct rpmsg_endpoint *virtio_rpmsg_create_ept(struct rpmsg_device *rpdev,
rpmsg_rx_cb_t cb,
void *priv,
@@ -341,8 +329,8 @@ static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev)
struct rpmsg_ns_msg nsm;
strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
- nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
- nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_CREATE);
+ nsm.addr = cpu_to_rpmsg32(rpdev, rpdev->ept->addr);
+ nsm.flags = cpu_to_rpmsg32(rpdev, RPMSG_NS_CREATE);
err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
if (err)
@@ -365,8 +353,8 @@ static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
struct rpmsg_ns_msg nsm;
strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
- nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
- nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_DESTROY);
+ nsm.addr = cpu_to_rpmsg32(rpdev, rpdev->ept->addr);
+ nsm.flags = cpu_to_rpmsg32(rpdev, RPMSG_NS_DESTROY);
err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
if (err)
@@ -377,6 +365,8 @@ static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
}
static const struct rpmsg_device_ops virtio_rpmsg_ops = {
+ .create_channel = virtio_rpmsg_create_channel,
+ .release_channel = virtio_rpmsg_release_channel,
.create_ept = virtio_rpmsg_create_ept,
.announce_create = virtio_rpmsg_announce_create,
.announce_destroy = virtio_rpmsg_announce_destroy,
@@ -395,8 +385,8 @@ static void virtio_rpmsg_release_device(struct device *dev)
* this function will be used to create both static and dynamic
* channels.
*/
-static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp,
- struct rpmsg_channel_info *chinfo)
+static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
+ struct rpmsg_channel_info *chinfo)
{
struct virtio_rpmsg_channel *vch;
struct rpmsg_device *rpdev;
@@ -425,6 +415,7 @@ static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp,
rpdev->src = chinfo->src;
rpdev->dst = chinfo->dst;
rpdev->ops = &virtio_rpmsg_ops;
+ rpdev->little_endian = virtio_is_little_endian(vrp->vdev);
/*
* rpmsg server channels has predefined local address (for now),
@@ -618,10 +609,10 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
}
}
- msg->len = cpu_to_virtio16(vrp->vdev, len);
+ msg->len = cpu_to_rpmsg16(rpdev, len);
msg->flags = 0;
- msg->src = cpu_to_virtio32(vrp->vdev, src);
- msg->dst = cpu_to_virtio32(vrp->vdev, dst);
+ msg->src = cpu_to_rpmsg32(rpdev, src);
+ msg->dst = cpu_to_rpmsg32(rpdev, dst);
msg->reserved = 0;
memcpy(msg->data, data, len);
@@ -710,14 +701,15 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
{
struct rpmsg_endpoint *ept;
struct scatterlist sg;
- unsigned int msg_len = virtio16_to_cpu(vrp->vdev, msg->len);
+ bool little_endian = virtio_is_little_endian(vrp->vdev);
+ unsigned int msg_len = __rpmsg16_to_cpu(little_endian, msg->len);
int err;
dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
- virtio32_to_cpu(vrp->vdev, msg->src),
- virtio32_to_cpu(vrp->vdev, msg->dst), msg_len,
- virtio16_to_cpu(vrp->vdev, msg->flags),
- virtio32_to_cpu(vrp->vdev, msg->reserved));
+ __rpmsg32_to_cpu(little_endian, msg->src),
+ __rpmsg32_to_cpu(little_endian, msg->dst), msg_len,
+ __rpmsg16_to_cpu(little_endian, msg->flags),
+ __rpmsg32_to_cpu(little_endian, msg->reserved));
#if defined(CONFIG_DYNAMIC_DEBUG)
dynamic_hex_dump("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
msg, sizeof(*msg) + msg_len, true);
@@ -736,7 +728,7 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
/* use the dst addr to fetch the callback of the appropriate user */
mutex_lock(&vrp->endpoints_lock);
- ept = idr_find(&vrp->endpoints, virtio32_to_cpu(vrp->vdev, msg->dst));
+ ept = idr_find(&vrp->endpoints, __rpmsg32_to_cpu(little_endian, msg->dst));
/* let's make sure no one deallocates ept while we use it */
if (ept)
@@ -750,7 +742,7 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
if (ept->cb)
ept->cb(ept->rpdev, msg->data, msg_len, ept->priv,
- virtio32_to_cpu(vrp->vdev, msg->src));
+ __rpmsg32_to_cpu(little_endian, msg->src));
mutex_unlock(&ept->cb_lock);
@@ -821,68 +813,14 @@ static void rpmsg_xmit_done(struct virtqueue *svq)
wake_up_interruptible(&vrp->sendq);
}
-/* invoked when a name service announcement arrives */
-static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
- void *priv, u32 src)
-{
- struct rpmsg_ns_msg *msg = data;
- struct rpmsg_device *newch;
- struct rpmsg_channel_info chinfo;
- struct virtproc_info *vrp = priv;
- struct device *dev = &vrp->vdev->dev;
- int ret;
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
- dynamic_hex_dump("NS announcement: ", DUMP_PREFIX_NONE, 16, 1,
- data, len, true);
-#endif
-
- if (len != sizeof(*msg)) {
- dev_err(dev, "malformed ns msg (%d)\n", len);
- return -EINVAL;
- }
-
- /*
- * the name service ept does _not_ belong to a real rpmsg channel,
- * and is handled by the rpmsg bus itself.
- * for sanity reasons, make sure a valid rpdev has _not_ sneaked
- * in somehow.
- */
- if (rpdev) {
- dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
- return -EINVAL;
- }
-
- /* don't trust the remote processor for null terminating the name */
- msg->name[RPMSG_NAME_SIZE - 1] = '\0';
-
- strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
- chinfo.src = RPMSG_ADDR_ANY;
- chinfo.dst = virtio32_to_cpu(vrp->vdev, msg->addr);
-
- dev_info(dev, "%sing channel %s addr 0x%x\n",
- virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY ?
- "destroy" : "creat", msg->name, chinfo.dst);
-
- if (virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY) {
- ret = rpmsg_unregister_device(&vrp->vdev->dev, &chinfo);
- if (ret)
- dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
- } else {
- newch = rpmsg_create_channel(vrp, &chinfo);
- if (!newch)
- dev_err(dev, "rpmsg_create_channel failed\n");
- }
-
- return 0;
-}
-
static int rpmsg_probe(struct virtio_device *vdev)
{
vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
static const char * const names[] = { "input", "output" };
struct virtqueue *vqs[2];
struct virtproc_info *vrp;
+ struct virtio_rpmsg_channel *vch;
+ struct rpmsg_device *rpdev_ns;
void *bufs_va;
int err = 0, i;
size_t total_buf_space;
@@ -958,14 +896,26 @@ static int rpmsg_probe(struct virtio_device *vdev)
/* if supported by the remote processor, enable the name service */
if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
- /* a dedicated endpoint handles the name service msgs */
- vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
- vrp, RPMSG_NS_ADDR);
- if (!vrp->ns_ept) {
- dev_err(&vdev->dev, "failed to create the ns ept\n");
+ vch = kzalloc(sizeof(*vch), GFP_KERNEL);
+ if (!vch) {
err = -ENOMEM;
goto free_coherent;
}
+
+ /* Link the channel to our vrp */
+ vch->vrp = vrp;
+
+ /* Assign public information to the rpmsg_device */
+ rpdev_ns = &vch->rpdev;
+ rpdev_ns->ops = &virtio_rpmsg_ops;
+ rpdev_ns->little_endian = virtio_is_little_endian(vrp->vdev);
+
+ rpdev_ns->dev.parent = &vrp->vdev->dev;
+ rpdev_ns->dev.release = virtio_rpmsg_release_device;
+
+ err = rpmsg_ns_register_device(rpdev_ns);
+ if (err)
+ goto free_coherent;
}
/*
@@ -990,6 +940,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
return 0;
free_coherent:
+ kfree(vch);
dma_free_coherent(vdev->dev.parent, total_buf_space,
bufs_va, vrp->bufs_dma);
vqs_del:
@@ -1018,9 +969,6 @@ static void rpmsg_remove(struct virtio_device *vdev)
if (ret)
dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret);
- if (vrp->ns_ept)
- __rpmsg_destroy_ept(vrp, vrp->ns_ept);
-
idr_destroy(&vrp->endpoints);
vdev->config->del_vqs(vrp->vdev);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 65ad9d0b47ab..6123f9f4fbc9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -13,7 +13,7 @@ config RTC_MC146818_LIB
menuconfig RTC_CLASS
bool "Real Time Clock"
default n
- depends on !S390 && !UML
+ depends on !S390
select RTC_LIB
help
Generic RTC class support. If you say yes here, you will
@@ -817,15 +817,6 @@ config RTC_DRV_RX4581
This driver can also be built as a module. If so the module
will be called rtc-rx4581.
-config RTC_DRV_RX6110
- tristate "Epson RX-6110"
- select REGMAP_SPI
- help
- If you say yes here you will get support for the Epson RX-6610.
-
- This driver can also be built as a module. If so the module
- will be called rtc-rx6110.
-
config RTC_DRV_RS5C348
tristate "Ricoh RS5C348A/B"
help
@@ -936,6 +927,17 @@ config RTC_DRV_RV3029_HWMON
Say Y here if you want to expose temperature sensor data on
rtc-rv3029.
+config RTC_DRV_RX6110
+ tristate "Epson RX-6110"
+ depends on RTC_I2C_AND_SPI
+ select REGMAP_SPI if SPI_MASTER
+ select REGMAP_I2C if I2C
+ help
+ If you say yes here you will get support for the Epson RX-6110.
+
+ This driver can also be built as a module. If so the module
+ will be called rtc-rx6110.
+
comment "Platform RTC drivers"
# this 'CMOS' RTC driver is arch dependent because it requires
@@ -1017,6 +1019,7 @@ config RTC_DRV_DS1553
config RTC_DRV_DS1685_FAMILY
tristate "Dallas/Maxim DS1685 Family"
+ depends on HAS_IOMEM
help
If you say yes here you get support for the Dallas/Maxim DS1685
family of real time chips. This family includes the DS1685/DS1687,
@@ -1150,6 +1153,7 @@ config RTC_DRV_STK17TA8
config RTC_DRV_M48T86
tristate "ST M48T86/Dallas DS12887"
+ depends on HAS_IOMEM
help
If you say Y here you will get support for the
ST M48T86 and Dallas DS12887 RTC chips.
@@ -1752,7 +1756,9 @@ config RTC_DRV_LOONGSON1
config RTC_DRV_MXC
tristate "Freescale MXC Real Time Clock"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
help
If you say yes here you get support for the Freescale MXC
RTC module.
@@ -1762,7 +1768,9 @@ config RTC_DRV_MXC
config RTC_DRV_MXC_V2
tristate "Freescale MXC Real Time Clock for i.MX53"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
help
If you say yes here you get support for the Freescale MXC
SRTC module in i.MX53 processor.
@@ -1935,7 +1943,6 @@ config RTC_DRV_HID_SENSOR_TIME
config RTC_DRV_GOLDFISH
tristate "Goldfish Real Time Clock"
depends on OF && HAS_IOMEM
- depends on GOLDFISH || COMPILE_TEST
help
Say yes to enable RTC driver for the Goldfish based virtual platform.
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 5855aa2eef62..7e470fbd5e4d 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -28,6 +28,7 @@ static void rtc_device_release(struct device *dev)
struct rtc_device *rtc = to_rtc_device(dev);
ida_simple_remove(&rtc_ida, rtc->id);
+ mutex_destroy(&rtc->ops_lock);
kfree(rtc);
}
@@ -326,8 +327,10 @@ static void rtc_device_get_offset(struct rtc_device *rtc)
*
* @rtc: the RTC class device to destroy
*/
-static void rtc_device_unregister(struct rtc_device *rtc)
+static void devm_rtc_unregister_device(void *data)
{
+ struct rtc_device *rtc = data;
+
mutex_lock(&rtc->ops_lock);
/*
* Remove innards of this RTC, then disable it, before
@@ -337,60 +340,43 @@ static void rtc_device_unregister(struct rtc_device *rtc)
cdev_device_del(&rtc->char_dev, &rtc->dev);
rtc->ops = NULL;
mutex_unlock(&rtc->ops_lock);
- put_device(&rtc->dev);
}
-static void devm_rtc_release_device(struct device *dev, void *res)
+static void devm_rtc_release_device(void *res)
{
- struct rtc_device *rtc = *(struct rtc_device **)res;
+ struct rtc_device *rtc = res;
- rtc_nvmem_unregister(rtc);
-
- if (rtc->registered)
- rtc_device_unregister(rtc);
- else
- put_device(&rtc->dev);
+ put_device(&rtc->dev);
}
struct rtc_device *devm_rtc_allocate_device(struct device *dev)
{
- struct rtc_device **ptr, *rtc;
+ struct rtc_device *rtc;
int id, err;
id = rtc_device_get_id(dev);
if (id < 0)
return ERR_PTR(id);
- ptr = devres_alloc(devm_rtc_release_device, sizeof(*ptr), GFP_KERNEL);
- if (!ptr) {
- err = -ENOMEM;
- goto exit_ida;
- }
-
rtc = rtc_allocate_device();
if (!rtc) {
- err = -ENOMEM;
- goto exit_devres;
+ ida_simple_remove(&rtc_ida, id);
+ return ERR_PTR(-ENOMEM);
}
- *ptr = rtc;
- devres_add(dev, ptr);
-
rtc->id = id;
rtc->dev.parent = dev;
dev_set_name(&rtc->dev, "rtc%d", id);
- return rtc;
+ err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc);
+ if (err)
+ return ERR_PTR(err);
-exit_devres:
- devres_free(ptr);
-exit_ida:
- ida_simple_remove(&rtc_ida, id);
- return ERR_PTR(err);
+ return rtc;
}
EXPORT_SYMBOL_GPL(devm_rtc_allocate_device);
-int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
+int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
{
struct rtc_wkalrm alrm;
int err;
@@ -420,7 +406,6 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
rtc_proc_add_device(rtc);
- rtc->registered = true;
dev_info(rtc->dev.parent, "registered as %s\n",
dev_name(&rtc->dev));
@@ -429,9 +414,10 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
rtc_hctosys(rtc);
#endif
- return 0;
+ return devm_add_action_or_reset(rtc->dev.parent,
+ devm_rtc_unregister_device, rtc);
}
-EXPORT_SYMBOL_GPL(__rtc_register_device);
+EXPORT_SYMBOL_GPL(__devm_rtc_register_device);
/**
* devm_rtc_device_register - resource managed rtc_device_register()
@@ -461,7 +447,7 @@ struct rtc_device *devm_rtc_device_register(struct device *dev,
rtc->ops = ops;
- err = __rtc_register_device(owner, rtc);
+ err = __devm_rtc_register_device(owner, rtc);
if (err)
return ERR_PTR(err);
diff --git a/drivers/rtc/nvmem.c b/drivers/rtc/nvmem.c
index 4312096c7738..07ede21cee34 100644
--- a/drivers/rtc/nvmem.c
+++ b/drivers/rtc/nvmem.c
@@ -9,99 +9,22 @@
#include <linux/types.h>
#include <linux/nvmem-consumer.h>
#include <linux/rtc.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-/*
- * Deprecated ABI compatibility, this should be removed at some point
- */
-
-static const char nvram_warning[] = "Deprecated ABI, please use nvmem";
-
-static ssize_t
-rtc_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- dev_warn_once(kobj_to_dev(kobj), nvram_warning);
-
- return nvmem_device_read(attr->private, off, count, buf);
-}
-
-static ssize_t
-rtc_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- dev_warn_once(kobj_to_dev(kobj), nvram_warning);
-
- return nvmem_device_write(attr->private, off, count, buf);
-}
-
-static int rtc_nvram_register(struct rtc_device *rtc,
- struct nvmem_device *nvmem, size_t size)
-{
- int err;
-
- rtc->nvram = kzalloc(sizeof(*rtc->nvram), GFP_KERNEL);
- if (!rtc->nvram)
- return -ENOMEM;
-
- rtc->nvram->attr.name = "nvram";
- rtc->nvram->attr.mode = 0644;
- rtc->nvram->private = nvmem;
-
- sysfs_bin_attr_init(rtc->nvram);
-
- rtc->nvram->read = rtc_nvram_read;
- rtc->nvram->write = rtc_nvram_write;
- rtc->nvram->size = size;
-
- err = sysfs_create_bin_file(&rtc->dev.parent->kobj,
- rtc->nvram);
- if (err) {
- kfree(rtc->nvram);
- rtc->nvram = NULL;
- }
-
- return err;
-}
-
-static void rtc_nvram_unregister(struct rtc_device *rtc)
-{
- sysfs_remove_bin_file(&rtc->dev.parent->kobj, rtc->nvram);
- kfree(rtc->nvram);
- rtc->nvram = NULL;
-}
-
-/*
- * New ABI, uses nvmem
- */
-int rtc_nvmem_register(struct rtc_device *rtc,
+int devm_rtc_nvmem_register(struct rtc_device *rtc,
struct nvmem_config *nvmem_config)
{
+ struct device *dev = rtc->dev.parent;
struct nvmem_device *nvmem;
if (!nvmem_config)
return -ENODEV;
- nvmem_config->dev = rtc->dev.parent;
+ nvmem_config->dev = dev;
nvmem_config->owner = rtc->owner;
- nvmem = devm_nvmem_register(rtc->dev.parent, nvmem_config);
+ nvmem = devm_nvmem_register(dev, nvmem_config);
if (IS_ERR(nvmem))
- return PTR_ERR(nvmem);
-
- /* Register the old ABI */
- if (rtc->nvram_old_abi)
- rtc_nvram_register(rtc, nvmem, nvmem_config->size);
+ dev_err(dev, "failed to register nvmem device for RTC\n");
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtc_nvmem_register);
-
-void rtc_nvmem_unregister(struct rtc_device *rtc)
-{
- /* unregister the old ABI */
- if (rtc->nvram)
- rtc_nvram_unregister(rtc);
+ return PTR_ERR_OR_ZERO(nvmem);
}
+EXPORT_SYMBOL_GPL(devm_rtc_nvmem_register);
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 75779e8501a3..6a3f44cf6ebe 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -294,7 +294,7 @@ static int pm80x_rtc_probe(struct platform_device *pdev)
info->rtc_dev->ops = &pm80x_rtc_ops;
info->rtc_dev->range_max = U32_MAX;
- ret = rtc_register_device(info->rtc_dev);
+ ret = devm_rtc_register_device(info->rtc_dev);
if (ret)
goto out_rtc;
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index c90457d001e9..2c809a1a445e 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -307,7 +307,7 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
info->rtc_dev->ops = &pm860x_rtc_ops;
info->rtc_dev->range_max = U32_MAX;
- ret = rtc_register_device(info->rtc_dev);
+ ret = devm_rtc_register_device(info->rtc_dev);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 2370ac0cdb5f..6e3e320dc727 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -892,7 +892,7 @@ static int abb5zes3_probe(struct i2c_client *client,
}
}
- ret = rtc_register_device(data->rtc);
+ ret = devm_rtc_register_device(data->rtc);
err:
if (ret && data->irq)
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index d690985caa4c..b20d8f26dcdb 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -420,7 +420,7 @@ static int abeoz9_probe(struct i2c_client *client,
data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
data->rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(data->rtc);
+ ret = devm_rtc_register_device(data->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index 2ed6def90975..e4fd961e8bf6 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -238,7 +238,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver ab3100_rtc_driver = {
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 3d60f3283f11..b40048871295 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -404,7 +404,7 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
if (err)
return err;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static int ab8500_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 803725b3a02c..6733bb0df674 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -851,7 +851,7 @@ static int abx80x_probe(struct i2c_client *client,
return err;
}
- return rtc_register_device(priv->rtc);
+ return devm_rtc_register_device(priv->rtc);
}
static const struct i2c_device_id abx80x_id[] = {
diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
index 29223931aba7..1ddbef99e38f 100644
--- a/drivers/rtc/rtc-ac100.c
+++ b/drivers/rtc/rtc-ac100.c
@@ -610,7 +610,7 @@ static int ac100_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
- return rtc_register_device(chip->rtc);
+ return devm_rtc_register_device(chip->rtc);
}
static int ac100_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 94d7c22fc4f3..807a79c07f08 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -556,7 +556,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->range_max = U32_MAX;
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
index eacdd0637cce..a93352ed3aec 100644
--- a/drivers/rtc/rtc-aspeed.c
+++ b/drivers/rtc/rtc-aspeed.c
@@ -104,7 +104,7 @@ static int aspeed_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
rtc->rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id aspeed_rtc_match[] = {
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 5e811e04cb21..fe396d27ebb7 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -36,6 +36,10 @@
#define AT91_RTC_UPDCAL BIT(1) /* Update Request Calendar Register */
#define AT91_RTC_MR 0x04 /* Mode Register */
+#define AT91_RTC_HRMOD BIT(0) /* 12/24 hour mode */
+#define AT91_RTC_NEGPPM BIT(4) /* Negative PPM correction */
+#define AT91_RTC_CORRECTION GENMASK(14, 8) /* Slow clock correction */
+#define AT91_RTC_HIGHPPM BIT(15) /* High PPM correction */
#define AT91_RTC_TIMR 0x08 /* Time Register */
#define AT91_RTC_SEC GENMASK(6, 0) /* Current Second */
@@ -77,6 +81,9 @@
#define AT91_RTC_NVTIMALR BIT(2) /* Non valid Time Alarm */
#define AT91_RTC_NVCALALR BIT(3) /* Non valid Calendar Alarm */
+#define AT91_RTC_CORR_DIVIDEND 3906000
+#define AT91_RTC_CORR_LOW_RATIO 20
+
#define at91_rtc_read(field) \
readl_relaxed(at91_rtc_regs + field)
#define at91_rtc_write(field, val) \
@@ -84,6 +91,7 @@
struct at91_rtc_config {
bool use_shadow_imr;
+ bool has_correction;
};
static const struct at91_rtc_config *at91_rtc_config;
@@ -293,6 +301,75 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
+static int at91_rtc_readoffset(struct device *dev, long *offset)
+{
+ u32 mr = at91_rtc_read(AT91_RTC_MR);
+ long val = FIELD_GET(AT91_RTC_CORRECTION, mr);
+
+ if (!val) {
+ *offset = 0;
+ return 0;
+ }
+
+ val++;
+
+ if (!(mr & AT91_RTC_NEGPPM))
+ val = -val;
+
+ if (!(mr & AT91_RTC_HIGHPPM))
+ val *= AT91_RTC_CORR_LOW_RATIO;
+
+ *offset = DIV_ROUND_CLOSEST(AT91_RTC_CORR_DIVIDEND, val);
+
+ return 0;
+}
+
+static int at91_rtc_setoffset(struct device *dev, long offset)
+{
+ long corr;
+ u32 mr;
+
+ if (offset > AT91_RTC_CORR_DIVIDEND / 2)
+ return -ERANGE;
+ if (offset < -AT91_RTC_CORR_DIVIDEND / 2)
+ return -ERANGE;
+
+ mr = at91_rtc_read(AT91_RTC_MR);
+ mr &= ~(AT91_RTC_NEGPPM | AT91_RTC_CORRECTION | AT91_RTC_HIGHPPM);
+
+ if (offset > 0)
+ mr |= AT91_RTC_NEGPPM;
+ else
+ offset = -offset;
+
+ /* offset less than 764 ppb, disable correction*/
+ if (offset < 764) {
+ at91_rtc_write(AT91_RTC_MR, mr & ~AT91_RTC_NEGPPM);
+
+ return 0;
+ }
+
+ /*
+ * 29208 ppb is the perfect cutoff between low range and high range
+ * low range values are never better than high range value after that.
+ */
+ if (offset < 29208) {
+ corr = DIV_ROUND_CLOSEST(AT91_RTC_CORR_DIVIDEND, offset * AT91_RTC_CORR_LOW_RATIO);
+ } else {
+ corr = DIV_ROUND_CLOSEST(AT91_RTC_CORR_DIVIDEND, offset);
+ mr |= AT91_RTC_HIGHPPM;
+ }
+
+ if (corr > 128)
+ corr = 128;
+
+ mr |= FIELD_PREP(AT91_RTC_CORRECTION, corr - 1);
+
+ at91_rtc_write(AT91_RTC_MR, mr);
+
+ return 0;
+}
+
/*
* IRQ handler for the RTC
*/
@@ -343,6 +420,10 @@ static const struct at91_rtc_config at91sam9x5_config = {
.use_shadow_imr = true,
};
+static const struct at91_rtc_config sama5d4_config = {
+ .has_correction = true,
+};
+
static const struct of_device_id at91_rtc_dt_ids[] = {
{
.compatible = "atmel,at91rm9200-rtc",
@@ -352,10 +433,13 @@ static const struct of_device_id at91_rtc_dt_ids[] = {
.data = &at91sam9x5_config,
}, {
.compatible = "atmel,sama5d4-rtc",
- .data = &at91rm9200_config,
+ .data = &sama5d4_config,
}, {
.compatible = "atmel,sama5d2-rtc",
- .data = &at91rm9200_config,
+ .data = &sama5d4_config,
+ }, {
+ .compatible = "microchip,sam9x60-rtc",
+ .data = &sama5d4_config,
}, {
/* sentinel */
}
@@ -370,6 +454,16 @@ static const struct rtc_class_ops at91_rtc_ops = {
.alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
+static const struct rtc_class_ops sama5d4_rtc_ops = {
+ .read_time = at91_rtc_readtime,
+ .set_time = at91_rtc_settime,
+ .read_alarm = at91_rtc_readalarm,
+ .set_alarm = at91_rtc_setalarm,
+ .alarm_irq_enable = at91_rtc_alarm_irq_enable,
+ .set_offset = at91_rtc_setoffset,
+ .read_offset = at91_rtc_readoffset,
+};
+
/*
* Initialize and install RTC driver
*/
@@ -416,7 +510,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
}
at91_rtc_write(AT91_RTC_CR, 0);
- at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
+ at91_rtc_write(AT91_RTC_MR, at91_rtc_read(AT91_RTC_MR) & ~AT91_RTC_HRMOD);
/* Disable all interrupts */
at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
@@ -437,10 +531,14 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
- rtc->ops = &at91_rtc_ops;
+ if (at91_rtc_config->has_correction)
+ rtc->ops = &sama5d4_rtc_ops;
+ else
+ rtc->ops = &at91_rtc_ops;
+
rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret)
goto err_clk;
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index e39e89867d29..2216be429ab7 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -431,7 +431,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "%s: SET TIME!\n",
dev_name(&rtc->rtcdev->dev));
- return rtc_register_device(rtc->rtcdev);
+ return devm_rtc_register_device(rtc->rtcdev);
err_clk:
clk_disable_unprepare(rtc->sclk);
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c
index 791bebcb6f47..e6428b27b5d4 100644
--- a/drivers/rtc/rtc-au1xxx.c
+++ b/drivers/rtc/rtc-au1xxx.c
@@ -104,7 +104,7 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtcdev);
- return rtc_register_device(rtcdev);
+ return devm_rtc_register_device(rtcdev);
}
static struct platform_driver au1xrtc_driver = {
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index 4492b770422c..17cb67f5bf6e 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -604,7 +604,7 @@ static int bd70528_probe(struct platform_device *pdev)
}
}
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static const struct platform_device_id bd718x7_rtc_id[] = {
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index 4fee57c51280..0366e2ff04ae 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -252,7 +252,7 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
timer->rtc->ops = &brcmstb_waketmr_ops;
timer->rtc->range_max = U32_MAX;
- ret = rtc_register_device(timer->rtc);
+ ret = devm_rtc_register_device(timer->rtc);
if (ret)
goto err_notifier;
@@ -264,8 +264,7 @@ err_notifier:
unregister_reboot_notifier(&timer->reboot_notifier);
err_clk:
- if (timer->clk)
- clk_disable_unprepare(timer->clk);
+ clk_disable_unprepare(timer->clk);
return ret;
}
diff --git a/drivers/rtc/rtc-cadence.c b/drivers/rtc/rtc-cadence.c
index 595d5d252850..1edf7f16d73a 100644
--- a/drivers/rtc/rtc-cadence.c
+++ b/drivers/rtc/rtc-cadence.c
@@ -336,7 +336,7 @@ static int cdns_rtc_probe(struct platform_device *pdev)
writel(0, crtc->regs + CDNS_RTC_HMR);
writel(CDNS_RTC_KRTCR_KRTC, crtc->regs + CDNS_RTC_KRTCR);
- ret = rtc_register_device(crtc->rtc_dev);
+ ret = devm_rtc_register_device(crtc->rtc_dev);
if (ret)
goto err_disable_wakeup;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c5bcd2adc9fe..a701dae653c4 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -805,6 +805,14 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
spin_lock_irq(&rtc_lock);
+ /* Ensure that the RTC is accessible. Bit 6 must be 0! */
+ if ((CMOS_READ(RTC_VALID) & 0x40) != 0) {
+ spin_unlock_irq(&rtc_lock);
+ dev_warn(dev, "not accessible\n");
+ retval = -ENXIO;
+ goto cleanup1;
+ }
+
if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) {
/* force periodic irq to CMOS reset default of 1024Hz;
*
@@ -863,8 +871,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm;
}
- cmos_rtc.rtc->nvram_old_abi = true;
- retval = rtc_register_device(cmos_rtc.rtc);
+ retval = devm_rtc_register_device(cmos_rtc.rtc);
if (retval)
goto cleanup2;
@@ -873,8 +880,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
/* export at least the first block of NVRAM */
nvmem_cfg.size = address_space - NVRAM_OFFSET;
- if (rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg))
- dev_err(dev, "nvmem registration failed\n");
+ devm_rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg);
dev_info(dev, "%s%s, %d bytes nvram%s\n",
!is_valid_irq(rtc_irq) ? "no alarms" :
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index da59917c9ee8..168ced87d93a 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -203,7 +203,7 @@ static int __init coh901331_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtap);
- ret = rtc_register_device(rtap->rtc);
+ ret = devm_rtc_register_device(rtap->rtc);
if (ret)
goto out_no_rtc;
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index 800667d73a6f..afc8fcba8f88 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -269,7 +269,8 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
rtc->alarm_irq = platform_get_irq(pdev, 0);
err = devm_request_threaded_irq(dev, rtc->alarm_irq, NULL,
- cpcap_rtc_alarm_irq, IRQF_TRIGGER_NONE,
+ cpcap_rtc_alarm_irq,
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
"rtc_alarm", rtc);
if (err) {
dev_err(dev, "Could not request alarm irq: %d\n", err);
@@ -285,7 +286,8 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
*/
rtc->update_irq = platform_get_irq(pdev, 1);
err = devm_request_threaded_irq(dev, rtc->update_irq, NULL,
- cpcap_rtc_update_irq, IRQF_TRIGGER_NONE,
+ cpcap_rtc_update_irq,
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
"rtc_1hz", rtc);
if (err) {
dev_err(dev, "Could not request update irq: %d\n", err);
@@ -299,7 +301,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
/* ignore error and continue without wakeup support */
}
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id cpcap_rtc_of_match[] = {
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index f7343c289cab..70626793ca69 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -350,7 +350,7 @@ static int cros_ec_rtc_probe(struct platform_device *pdev)
cros_ec_rtc->rtc->ops = &cros_ec_rtc_ops;
cros_ec_rtc->rtc->range_max = U32_MAX;
- ret = rtc_register_device(cros_ec_rtc->rtc);
+ ret = devm_rtc_register_device(cros_ec_rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 58de10da37b1..9ca99bd35702 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -304,7 +304,7 @@ static int da9052_rtc_probe(struct platform_device *pdev)
rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->rtc->range_max = RTC_TIMESTAMP_END_2063;
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 046b1d4c3dae..d4b72a9fa2ba 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -243,7 +243,7 @@ static int da9063_rtc_read_time(struct device *dev, struct rtc_time *tm)
al_secs = rtc_tm_to_time64(&rtc->alarm_time);
/* handle the rtc synchronisation delay */
- if (rtc->rtc_sync == true && al_secs - tm_secs == 1)
+ if (rtc->rtc_sync && al_secs - tm_secs == 1)
memcpy(tm, &rtc->alarm_time, sizeof(struct rtc_time));
else
rtc->rtc_sync = false;
@@ -494,7 +494,7 @@ static int da9063_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
irq_alarm, ret);
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static struct platform_driver da9063_rtc_driver = {
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 73f87a17cdf3..6bef0f2353da 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -484,7 +484,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
- return rtc_register_device(davinci_rtc->rtc);
+ return devm_rtc_register_device(davinci_rtc->rtc);
}
static int __exit davinci_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 200d85b01e8b..4fdfa5b6feb2 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -202,7 +202,7 @@ static int __init dc_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->ops = &dc_rtc_ops;
rtc->rtc_dev->range_max = U32_MAX;
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id dc_dt_ids[] = {
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
index cd947a20843b..94fb16ac3e0f 100644
--- a/drivers/rtc/rtc-dm355evm.c
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -132,7 +132,7 @@ static int dm355evm_rtc_probe(struct platform_device *pdev)
rtc->ops = &dm355evm_rtc_ops;
rtc->range_max = U32_MAX;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
/*
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index a3d790889eea..8c2ab29c3d91 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -694,12 +694,11 @@ static int ds1305_probe(struct spi_device *spi)
ds1305->rtc->range_max = RTC_TIMESTAMP_END_2099;
ds1305_nvmem_cfg.priv = ds1305;
- ds1305->rtc->nvram_old_abi = true;
- status = rtc_register_device(ds1305->rtc);
+ status = devm_rtc_register_device(ds1305->rtc);
if (status)
return status;
- rtc_nvmem_register(ds1305->rtc, &ds1305_nvmem_cfg);
+ devm_rtc_nvmem_register(ds1305->rtc, &ds1305_nvmem_cfg);
/* Maybe set up alarm IRQ; be ready to handle it triggering right
* away. NOTE that we don't share this. The signal is active low,
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 9f5f54ca039d..183cf7c01364 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -8,12 +8,12 @@
* Copyright (C) 2012 Bertrand Achard (nvram access fixes)
*/
-#include <linux/acpi.h>
#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/rtc/ds1307.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -31,6 +31,7 @@
* That's a natural job for a factory or repair bench.
*/
enum ds_type {
+ unknown_ds_type, /* always first and 0 */
ds_1307,
ds_1308,
ds_1337,
@@ -1090,7 +1091,6 @@ static const struct i2c_device_id ds1307_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
-#ifdef CONFIG_OF
static const struct of_device_id ds1307_of_match[] = {
{
.compatible = "dallas,ds1307",
@@ -1167,32 +1167,6 @@ static const struct of_device_id ds1307_of_match[] = {
{ }
};
MODULE_DEVICE_TABLE(of, ds1307_of_match);
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id ds1307_acpi_ids[] = {
- { .id = "DS1307", .driver_data = ds_1307 },
- { .id = "DS1308", .driver_data = ds_1308 },
- { .id = "DS1337", .driver_data = ds_1337 },
- { .id = "DS1338", .driver_data = ds_1338 },
- { .id = "DS1339", .driver_data = ds_1339 },
- { .id = "DS1388", .driver_data = ds_1388 },
- { .id = "DS1340", .driver_data = ds_1340 },
- { .id = "DS1341", .driver_data = ds_1341 },
- { .id = "DS3231", .driver_data = ds_3231 },
- { .id = "M41T0", .driver_data = m41t0 },
- { .id = "M41T00", .driver_data = m41t00 },
- { .id = "M41T11", .driver_data = m41t11 },
- { .id = "MCP7940X", .driver_data = mcp794xx },
- { .id = "MCP7941X", .driver_data = mcp794xx },
- { .id = "PT7C4338", .driver_data = ds_1307 },
- { .id = "RX8025", .driver_data = rx_8025 },
- { .id = "ISL12057", .driver_data = ds_1337 },
- { .id = "RX8130", .driver_data = rx_8130 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
-#endif
/*
* The ds1337 and ds1339 both have two alarms, but we only use the first
@@ -1626,13 +1600,16 @@ static const struct clk_ops ds3231_clk_32khz_ops = {
.recalc_rate = ds3231_clk_32khz_recalc_rate,
};
+static const char *ds3231_clks_names[] = {
+ [DS3231_CLK_SQW] = "ds3231_clk_sqw",
+ [DS3231_CLK_32KHZ] = "ds3231_clk_32khz",
+};
+
static struct clk_init_data ds3231_clks_init[] = {
[DS3231_CLK_SQW] = {
- .name = "ds3231_clk_sqw",
.ops = &ds3231_clk_sqw_ops,
},
[DS3231_CLK_32KHZ] = {
- .name = "ds3231_clk_32khz",
.ops = &ds3231_clk_32khz_ops,
},
};
@@ -1653,6 +1630,11 @@ static int ds3231_clks_register(struct ds1307 *ds1307)
if (!onecell->clks)
return -ENOMEM;
+ /* optional override of the clockname */
+ device_property_read_string_array(ds1307->dev, "clock-output-names",
+ ds3231_clks_names,
+ ARRAY_SIZE(ds3231_clks_names));
+
for (i = 0; i < ARRAY_SIZE(ds3231_clks_init); i++) {
struct clk_init_data init = ds3231_clks_init[i];
@@ -1663,9 +1645,7 @@ static int ds3231_clks_register(struct ds1307 *ds1307)
if (i == DS3231_CLK_SQW && test_bit(HAS_ALARM, &ds1307->flags))
continue;
- /* optional override of the clockname */
- of_property_read_string_index(node, "clock-output-names", i,
- &init.name);
+ init.name = ds3231_clks_names[i];
ds1307->clks[i].init = &init;
onecell->clks[i] = devm_clk_register(ds1307->dev,
@@ -1674,10 +1654,8 @@ static int ds3231_clks_register(struct ds1307 *ds1307)
return PTR_ERR(onecell->clks[i]);
}
- if (!node)
- return 0;
-
- of_clk_add_provider(node, of_clk_src_onecell_get, onecell);
+ if (node)
+ of_clk_add_provider(node, of_clk_src_onecell_get, onecell);
return 0;
}
@@ -1761,6 +1739,7 @@ static int ds1307_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ds1307 *ds1307;
+ const void *match;
int err = -ENODEV;
int tmp;
const struct chip_desc *chip;
@@ -1786,22 +1765,15 @@ static int ds1307_probe(struct i2c_client *client,
i2c_set_clientdata(client, ds1307);
- if (client->dev.of_node) {
- ds1307->type = (enum ds_type)
- of_device_get_match_data(&client->dev);
+ match = device_get_match_data(&client->dev);
+ if (match) {
+ ds1307->type = (enum ds_type)match;
chip = &chips[ds1307->type];
} else if (id) {
chip = &chips[id->driver_data];
ds1307->type = id->driver_data;
} else {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(ACPI_PTR(ds1307_acpi_ids),
- ds1307->dev);
- if (!acpi_id)
- return -ENODEV;
- chip = &chips[acpi_id->driver_data];
- ds1307->type = acpi_id->driver_data;
+ return -ENODEV;
}
want_irq = client->irq > 0 && chip->alarm;
@@ -1819,7 +1791,6 @@ static int ds1307_probe(struct i2c_client *client,
trickle_charger_setup);
}
-#ifdef CONFIG_OF
/*
* For devices with no IRQ directly connected to the SoC, the RTC chip
* can be forced as a wakeup source by stating that explicitly in
@@ -1828,10 +1799,8 @@ static int ds1307_probe(struct i2c_client *client,
* This will guarantee the 'wakealarm' sysfs entry is available on the device,
* if supported by the RTC.
*/
- if (chip->alarm && of_property_read_bool(client->dev.of_node,
- "wakeup-source"))
+ if (chip->alarm && device_property_read_bool(&client->dev, "wakeup-source"))
ds1307_can_wakeup_device = true;
-#endif
switch (ds1307->type) {
case ds_1337:
@@ -2032,7 +2001,7 @@ static int ds1307_probe(struct i2c_client *client,
if (err)
return err;
- err = rtc_register_device(ds1307->rtc);
+ err = devm_rtc_register_device(ds1307->rtc);
if (err)
return err;
@@ -2047,8 +2016,7 @@ static int ds1307_probe(struct i2c_client *client,
.priv = ds1307,
};
- ds1307->rtc->nvram_old_abi = true;
- rtc_nvmem_register(ds1307->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(ds1307->rtc, &nvmem_cfg);
}
ds1307_hwmon_register(ds1307);
@@ -2064,8 +2032,7 @@ exit:
static struct i2c_driver ds1307_driver = {
.driver = {
.name = "rtc-ds1307",
- .of_match_table = of_match_ptr(ds1307_of_match),
- .acpi_match_table = ACPI_PTR(ds1307_acpi_ids),
+ .of_match_table = ds1307_of_match,
},
.probe = ds1307_probe,
.id_table = ds1307_id,
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index ba143423875b..f14ed6c96437 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -399,7 +399,6 @@ static int ds1343_probe(struct spi_device *spi)
if (IS_ERR(priv->rtc))
return PTR_ERR(priv->rtc);
- priv->rtc->nvram_old_abi = true;
priv->rtc->ops = &ds1343_rtc_ops;
priv->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
priv->rtc->range_max = RTC_TIMESTAMP_END_2099;
@@ -409,12 +408,12 @@ static int ds1343_probe(struct spi_device *spi)
dev_err(&spi->dev,
"unable to create sysfs entries for rtc ds1343\n");
- res = rtc_register_device(priv->rtc);
+ res = devm_rtc_register_device(priv->rtc);
if (res)
return res;
nvmem_cfg.priv = priv;
- rtc_nvmem_register(priv->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(priv->rtc, &nvmem_cfg);
priv->irq = spi->irq;
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index 7025cf3fb9f8..157bf5209ac4 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -166,7 +166,7 @@ static int ds1347_probe(struct spi_device *spi)
rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
rtc->range_max = RTC_TIMESTAMP_END_9999;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct spi_driver ds1347_driver = {
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 177d870bda0d..fab79921a712 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -508,7 +508,7 @@ static int ds1374_probe(struct i2c_client *client,
ds1374->rtc->ops = &ds1374_rtc_ops;
ds1374->rtc->range_max = U32_MAX;
- ret = rtc_register_device(ds1374->rtc);
+ ret = devm_rtc_register_device(ds1374->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index a63872c4c76d..bda884333082 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -466,13 +466,11 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
pdata->rtc->ops = &ds1511_rtc_ops;
- pdata->rtc->nvram_old_abi = true;
-
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret)
return ret;
- rtc_nvmem_register(pdata->rtc, &ds1511_nvmem_cfg);
+ devm_rtc_nvmem_register(pdata->rtc, &ds1511_nvmem_cfg);
/*
* if the platform has an interrupt in mind for this device,
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index cdf5e05b9489..dbff5b621ef5 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -294,9 +294,8 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
return PTR_ERR(pdata->rtc);
pdata->rtc->ops = &ds1553_rtc_ops;
- pdata->rtc->nvram_old_abi = true;
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret)
return ret;
@@ -310,8 +309,7 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
}
}
- if (rtc_nvmem_register(pdata->rtc, &nvmem_cfg))
- dev_err(&pdev->dev, "unable to register nvmem\n");
+ devm_rtc_nvmem_register(pdata->rtc, &nvmem_cfg);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 9da84df9f152..630493759d15 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -124,7 +124,7 @@ static int ds1672_probe(struct i2c_client *client,
rtc->ops = &ds1672_rtc_ops;
rtc->range_max = U32_MAX;
- err = rtc_register_device(rtc);
+ err = devm_rtc_register_device(rtc);
if (err)
return err;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index dfbd7b88b2b9..d69c807af29b 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -1316,13 +1316,12 @@ ds1685_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
- rtc_dev->nvram_old_abi = true;
nvmem_cfg.priv = rtc;
- ret = rtc_nvmem_register(rtc_dev, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(rtc_dev, &nvmem_cfg);
if (ret)
return ret;
- return rtc_register_device(rtc_dev);
+ return devm_rtc_register_device(rtc_dev);
}
/**
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 2b949f0dbaa9..13d45c697da6 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -190,14 +190,12 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc);
rtc->ops = &ds1742_rtc_ops;
- rtc->nvram_old_abi = true;
- ret = rtc_register_device(rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret)
return ret;
- if (rtc_nvmem_register(rtc, &nvmem_cfg))
- dev_err(&pdev->dev, "Unable to register nvmem\n");
+ devm_rtc_nvmem_register(rtc, &nvmem_cfg);
return 0;
}
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 9df0c44512b8..0480f592307e 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -234,7 +234,7 @@ static int rtc_probe(struct platform_device *pdev)
chip->rtc->ops = &ds2404_rtc_ops;
chip->rtc->range_max = U32_MAX;
- retval = rtc_register_device(chip->rtc);
+ retval = devm_rtc_register_device(chip->rtc);
if (retval)
return retval;
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 69c37ab64352..16b89035d135 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -518,7 +518,7 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
if (IS_ERR(ds3232->rtc))
return PTR_ERR(ds3232->rtc);
- ret = rtc_nvmem_register(ds3232->rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(ds3232->rtc, &nvmem_cfg);
if(ret)
return ret;
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 8ec9ea1ca72e..acae7f16808f 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -33,7 +33,7 @@ struct ep93xx_rtc {
static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
unsigned short *delete)
{
- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
+ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev);
unsigned long comp;
comp = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP);
@@ -51,7 +51,7 @@ static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
+ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev);
unsigned long time;
time = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA);
@@ -62,7 +62,7 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int ep93xx_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
+ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev);
unsigned long secs = rtc_tm_to_time64(tm);
writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD);
@@ -145,7 +145,7 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
if (err)
return err;
- return rtc_register_device(ep93xx_rtc->rtc);
+ return devm_rtc_register_device(ep93xx_rtc->rtc);
}
static struct platform_driver ep93xx_rtc_driver = {
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 48d3b38ea348..57cc09d0a806 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -290,7 +290,7 @@ static int ftm_rtc_probe(struct platform_device *pdev)
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
- ret = rtc_register_device(rtc->rtc_dev);
+ ret = devm_rtc_register_device(rtc->rtc_dev);
if (ret) {
dev_err(&pdev->dev, "can't register rtc device\n");
return ret;
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index 0919f7dc94a3..ad3add5db4c8 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -176,7 +176,7 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
if (unlikely(ret))
return ret;
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static int ftrtc010_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index 6349d2cd3680..7ab95d052644 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -194,7 +194,7 @@ static int goldfish_rtc_probe(struct platform_device *pdev)
if (err)
return err;
- return rtc_register_device(rtcdrv->rtc);
+ return devm_rtc_register_device(rtcdrv->rtc);
}
static const struct of_device_id goldfish_rtc_of_match[] = {
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 0fb79c4afb46..24e0095be058 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -527,8 +527,6 @@ static int hym8563_probe(struct i2c_client *client,
hym8563->client = client;
i2c_set_clientdata(client, hym8563);
- device_set_wakeup_capable(&client->dev, true);
-
ret = hym8563_init_device(client);
if (ret) {
dev_err(&client->dev, "could not init device, %d\n", ret);
@@ -547,6 +545,11 @@ static int hym8563_probe(struct i2c_client *client,
}
}
+ if (client->irq > 0 ||
+ device_property_read_bool(&client->dev, "wakeup-source")) {
+ device_init_wakeup(&client->dev, true);
+ }
+
/* check state of calendar information */
ret = i2c_smbus_read_byte_data(client, HYM8563_SEC);
if (ret < 0)
diff --git a/drivers/rtc/rtc-imx-sc.c b/drivers/rtc/rtc-imx-sc.c
index a5f59e6f862e..cc9fbab49999 100644
--- a/drivers/rtc/rtc-imx-sc.c
+++ b/drivers/rtc/rtc-imx-sc.c
@@ -166,7 +166,7 @@ static int imx_sc_rtc_probe(struct platform_device *pdev)
imx_sc_rtc->range_min = 0;
imx_sc_rtc->range_max = U32_MAX;
- ret = rtc_register_device(imx_sc_rtc);
+ ret = devm_rtc_register_device(imx_sc_rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 8d141d8a5490..c2692da74e09 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -814,7 +814,7 @@ static int __init dryice_rtc_probe(struct platform_device *pdev)
imxdi->rtc->ops = &dryice_rtc_ops;
imxdi->rtc->range_max = U32_MAX;
- rc = rtc_register_device(imxdi->rtc);
+ rc = devm_rtc_register_device(imxdi->rtc);
if (rc)
goto err;
diff --git a/drivers/rtc/rtc-isl12026.c b/drivers/rtc/rtc-isl12026.c
index 5b6b17fb6d62..1fc6627d854d 100644
--- a/drivers/rtc/rtc-isl12026.c
+++ b/drivers/rtc/rtc-isl12026.c
@@ -465,11 +465,11 @@ static int isl12026_probe_new(struct i2c_client *client)
priv->rtc->ops = &isl12026_rtc_ops;
nvm_cfg.priv = priv;
- ret = rtc_nvmem_register(priv->rtc, &nvm_cfg);
+ ret = devm_rtc_nvmem_register(priv->rtc, &nvm_cfg);
if (ret)
return ret;
- return rtc_register_device(priv->rtc);
+ return devm_rtc_register_device(priv->rtc);
}
static int isl12026_remove(struct i2c_client *client)
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index ebb691fa48a6..563a6d9c9fcf 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -890,11 +890,11 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (rc)
return rc;
- rc = rtc_nvmem_register(isl1208->rtc, &isl1208->nvmem_config);
+ rc = devm_rtc_nvmem_register(isl1208->rtc, &isl1208->nvmem_config);
if (rc)
return rc;
- return rtc_register_device(isl1208->rtc);
+ return devm_rtc_register_device(isl1208->rtc);
}
static struct i2c_driver isl1208_driver = {
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 9607e6b6e0b3..6e51df72fd65 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -375,7 +375,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
/* Each 1 Hz pulse should happen after (rate) ticks */
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_REGULATOR, rate - 1);
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 15d8abda81fe..76ad7031a13d 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -239,7 +239,7 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
rtc->rtc->ops = &lpc32xx_rtc_ops;
rtc->rtc->range_max = U32_MAX;
- err = rtc_register_device(rtc->rtc);
+ err = devm_rtc_register_device(rtc->rtc);
if (err)
return err;
diff --git a/drivers/rtc/rtc-ls1x.c b/drivers/rtc/rtc-ls1x.c
index 8bd34056fea0..5af26dc5c2a3 100644
--- a/drivers/rtc/rtc-ls1x.c
+++ b/drivers/rtc/rtc-ls1x.c
@@ -176,7 +176,7 @@ static int ls1x_rtc_probe(struct platform_device *pdev)
rtcdev->range_min = RTC_TIMESTAMP_BEGIN_1900;
rtcdev->range_max = RTC_TIMESTAMP_END_2099;
- return rtc_register_device(rtcdev);
+ return devm_rtc_register_device(rtcdev);
}
static struct platform_driver ls1x_rtc_driver = {
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 8a89bc52b0d4..160dcf68e64e 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -977,7 +977,7 @@ static int m41t80_probe(struct i2c_client *client,
m41t80_sqw_register_clk(m41t80_data);
#endif
- rc = rtc_register_device(m41t80_data->rtc);
+ rc = devm_rtc_register_device(m41t80_data->rtc);
if (rc)
return rc;
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 67e218758a8b..5f5898d3b055 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -463,15 +463,14 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
if (IS_ERR(m48t59->rtc))
return PTR_ERR(m48t59->rtc);
- m48t59->rtc->nvram_old_abi = true;
m48t59->rtc->ops = ops;
nvmem_cfg.size = pdata->offset;
- ret = rtc_nvmem_register(m48t59->rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(m48t59->rtc, &nvmem_cfg);
if (ret)
return ret;
- ret = rtc_register_device(m48t59->rtc);
+ ret = devm_rtc_register_device(m48t59->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 75a0e73071d8..481c9525b1dd 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -254,13 +254,12 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return PTR_ERR(info->rtc);
info->rtc->ops = &m48t86_rtc_ops;
- info->rtc->nvram_old_abi = true;
- err = rtc_register_device(info->rtc);
+ err = devm_rtc_register_device(info->rtc);
if (err)
return err;
- rtc_nvmem_register(info->rtc, &m48t86_nvmem_cfg);
+ devm_rtc_nvmem_register(info->rtc, &m48t86_nvmem_cfg);
/* read battery status */
reg = m48t86_readb(&pdev->dev, M48T86_D);
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index d6802e6191cb..d4234e78497e 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -307,7 +307,7 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
mc13xxx_unlock(mc13xxx);
- ret = rtc_register_device(priv->rtc);
+ ret = devm_rtc_register_device(priv->rtc);
if (ret) {
mc13xxx_lock(mc13xxx);
goto err_irq_request;
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 972a5b9a629d..dcfaf09946ee 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -21,6 +21,13 @@ unsigned int mc146818_get_time(struct rtc_time *time)
again:
spin_lock_irqsave(&rtc_lock, flags);
+ /* Ensure that the RTC is accessible. Bit 6 must be 0! */
+ if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x40) != 0)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ memset(time, 0xff, sizeof(*time));
+ return 0;
+ }
+
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index e6bd0808a092..1463c8621561 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -83,7 +83,7 @@ static int meson_vrtc_probe(struct platform_device *pdev)
return PTR_ERR(vrtc->rtc);
vrtc->rtc->ops = &meson_vrtc_ops;
- return rtc_register_device(vrtc->rtc);
+ return devm_rtc_register_device(vrtc->rtc);
}
static int __maybe_unused meson_vrtc_suspend(struct device *dev)
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index 47ebcf834cc2..8642c06565ea 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -365,11 +365,11 @@ static int meson_rtc_probe(struct platform_device *pdev)
}
meson_rtc_nvmem_config.priv = rtc;
- ret = rtc_nvmem_register(rtc->rtc, &meson_rtc_nvmem_config);
+ ret = devm_rtc_nvmem_register(rtc->rtc, &meson_rtc_nvmem_config);
if (ret)
goto out_disable_vdd;
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
goto out_disable_vdd;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 5c2ce71aa044..bb2ea9bc56f2 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -371,7 +371,7 @@ static int mpc5121_rtc_probe(struct platform_device *op)
rtc->rtc->range_max = U32_MAX;
}
- err = rtc_register_device(rtc->rtc);
+ err = devm_rtc_register_device(rtc->rtc);
if (err)
goto out_dispose2;
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 17bf5394e1e5..421b3b6071b6 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -361,7 +361,7 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
}
}
- retval = rtc_register_device(mrst_rtc.rtc);
+ retval = devm_rtc_register_device(mrst_rtc.rtc);
if (retval)
goto cleanup0;
diff --git a/drivers/rtc/rtc-mt2712.c b/drivers/rtc/rtc-mt2712.c
index d5f691c8a035..cd92a9788351 100644
--- a/drivers/rtc/rtc-mt2712.c
+++ b/drivers/rtc/rtc-mt2712.c
@@ -352,7 +352,7 @@ static int mt2712_rtc_probe(struct platform_device *pdev)
mt2712_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
mt2712_rtc->rtc->range_max = MT2712_RTC_TIMESTAMP_END_2127;
- return rtc_register_device(mt2712_rtc->rtc);
+ return devm_rtc_register_device(mt2712_rtc->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 1894aded4c85..6655035e5164 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -301,7 +301,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->ops = &mtk_rtc_ops;
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index d5f190e578e4..f8e2ecea1d8d 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -278,7 +278,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (!ret)
return 0;
out:
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index a8cfbde048f4..65b29b0fa548 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -70,27 +70,12 @@ struct rtc_plat_data {
enum imx_rtc_type devtype;
};
-static const struct platform_device_id imx_rtc_devtype[] = {
- {
- .name = "imx1-rtc",
- .driver_data = IMX1_RTC,
- }, {
- .name = "imx21-rtc",
- .driver_data = IMX21_RTC,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, imx_rtc_devtype);
-
-#ifdef CONFIG_OF
static const struct of_device_id imx_rtc_dt_ids[] = {
{ .compatible = "fsl,imx1-rtc", .data = (const void *)IMX1_RTC },
{ .compatible = "fsl,imx21-rtc", .data = (const void *)IMX21_RTC },
{}
};
MODULE_DEVICE_TABLE(of, imx_rtc_dt_ids);
-#endif
static inline int is_imx1_rtc(struct rtc_plat_data *data)
{
@@ -322,17 +307,12 @@ static int mxc_rtc_probe(struct platform_device *pdev)
u32 reg;
unsigned long rate;
int ret;
- const struct of_device_id *of_id;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- of_id = of_match_device(imx_rtc_dt_ids, &pdev->dev);
- if (of_id)
- pdata->devtype = (enum imx_rtc_type)of_id->data;
- else
- pdata->devtype = pdev->id_entry->driver_data;
+ pdata->devtype = (enum imx_rtc_type)of_device_get_match_data(&pdev->dev);
pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->ioaddr))
@@ -428,7 +408,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to enable irq wake\n");
}
- ret = rtc_register_device(rtc);
+ ret = devm_rtc_register_device(rtc);
return ret;
}
@@ -438,7 +418,6 @@ static struct platform_driver mxc_rtc_driver = {
.name = "mxc_rtc",
.of_match_table = of_match_ptr(imx_rtc_dt_ids),
},
- .id_table = imx_rtc_devtype,
.probe = mxc_rtc_probe,
};
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index 91534560fe2a..0d73f6f0cf9e 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -354,7 +354,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
return ret;
}
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret < 0)
clk_unprepare(pdata->clk);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c20fc7937dfa..dc7db2477f88 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -879,18 +879,18 @@ static int omap_rtc_probe(struct platform_device *pdev)
/* Support ext_wakeup pinconf */
rtc_pinctrl_desc.name = dev_name(&pdev->dev);
- rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
+ rtc->pctldev = devm_pinctrl_register(&pdev->dev, &rtc_pinctrl_desc, rtc);
if (IS_ERR(rtc->pctldev)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
ret = PTR_ERR(rtc->pctldev);
goto err;
}
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
- goto err_deregister_pinctrl;
+ goto err;
- rtc_nvmem_register(rtc->rtc, &omap_rtc_nvmem_config);
+ devm_rtc_nvmem_register(rtc->rtc, &omap_rtc_nvmem_config);
if (rtc->is_pmic_controller) {
if (!pm_power_off) {
@@ -901,8 +901,6 @@ static int omap_rtc_probe(struct platform_device *pdev)
return 0;
-err_deregister_pinctrl:
- pinctrl_unregister(rtc->pctldev);
err:
clk_disable_unprepare(rtc->clk);
device_init_wakeup(&pdev->dev, false);
@@ -945,9 +943,6 @@ static int omap_rtc_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- /* Remove ext_wakeup pinconf */
- pinctrl_unregister(rtc->pctldev);
-
return 0;
}
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index 178bfb1dea21..8c7a98a5452c 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -163,7 +163,7 @@ static int __init pcap_rtc_probe(struct platform_device *pdev)
if (err)
return err;
- return rtc_register_device(pcap_rtc->rtc);
+ return devm_rtc_register_device(pcap_rtc->rtc);
}
static int __exit pcap_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index c3691fa4210e..534ffc91eec1 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -434,7 +434,7 @@ static int pcf2123_probe(struct spi_device *spi)
rtc->range_max = RTC_TIMESTAMP_END_2099;
rtc->set_start_time = true;
- ret = rtc_register_device(rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 07a5630ec841..39a7b5116aa4 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -243,10 +243,8 @@ static int pcf2127_nvmem_read(void *priv, unsigned int offset,
if (ret)
return ret;
- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD,
- val, bytes);
-
- return ret ?: bytes;
+ return regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD,
+ val, bytes);
}
static int pcf2127_nvmem_write(void *priv, unsigned int offset,
@@ -261,10 +259,8 @@ static int pcf2127_nvmem_write(void *priv, unsigned int offset,
if (ret)
return ret;
- ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD,
- val, bytes);
-
- return ret ?: bytes;
+ return regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD,
+ val, bytes);
}
/* watchdog driver */
@@ -335,6 +331,37 @@ static const struct watchdog_ops pcf2127_watchdog_ops = {
.set_timeout = pcf2127_wdt_set_timeout,
};
+static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
+{
+ u32 wdd_timeout;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_WATCHDOG) ||
+ !device_property_read_bool(dev, "reset-source"))
+ return 0;
+
+ pcf2127->wdd.parent = dev;
+ pcf2127->wdd.info = &pcf2127_wdt_info;
+ pcf2127->wdd.ops = &pcf2127_watchdog_ops;
+ pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN;
+ pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
+ pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
+ pcf2127->wdd.min_hw_heartbeat_ms = 500;
+ pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+
+ watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
+
+ /* Test if watchdog timer is started by bootloader */
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
+ if (ret)
+ return ret;
+
+ if (wdd_timeout)
+ set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
+
+ return devm_watchdog_register_device(dev, &pcf2127->wdd);
+}
+
/* Alarm */
static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
@@ -536,7 +563,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
int alarm_irq, const char *name, bool has_nvmem)
{
struct pcf2127 *pcf2127;
- u32 wdd_timeout;
int ret = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -575,17 +601,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops;
}
- pcf2127->wdd.parent = dev;
- pcf2127->wdd.info = &pcf2127_wdt_info;
- pcf2127->wdd.ops = &pcf2127_watchdog_ops;
- pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN;
- pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
- pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
- pcf2127->wdd.min_hw_heartbeat_ms = 500;
- pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
-
- watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
-
if (has_nvmem) {
struct nvmem_config nvmem_cfg = {
.priv = pcf2127,
@@ -594,7 +609,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
.size = 512,
};
- ret = rtc_nvmem_register(pcf2127->rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(pcf2127->rtc, &nvmem_cfg);
}
/*
@@ -615,19 +630,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return ret;
}
- /* Test if watchdog timer is started by bootloader */
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
- if (ret)
- return ret;
-
- if (wdd_timeout)
- set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
-
-#ifdef CONFIG_WATCHDOG
- ret = devm_watchdog_register_device(dev, &pcf2127->wdd);
- if (ret)
- return ret;
-#endif /* CONFIG_WATCHDOG */
+ pcf2127_watchdog_init(dev, pcf2127);
/*
* Disable battery low/switch-over timestamp and interrupts.
@@ -680,7 +683,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return ret;
}
- return rtc_register_device(pcf2127->rtc);
+ return devm_rtc_register_device(pcf2127->rtc);
}
#ifdef CONFIG_OF
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index f8b99cb72959..e19cf2adbc35 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -607,14 +607,14 @@ static int pcf85063_probe(struct i2c_client *client)
}
nvmem_cfg.priv = pcf85063->regmap;
- rtc_nvmem_register(pcf85063->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(pcf85063->rtc, &nvmem_cfg);
#ifdef CONFIG_COMMON_CLK
/* register clk in common clk framework */
pcf85063_clkout_register_clk(pcf85063);
#endif
- return rtc_register_device(pcf85063->rtc);
+ return devm_rtc_register_device(pcf85063->rtc);
}
#ifdef CONFIG_OF
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 57d351dfe272..5e1e7b2a8c9a 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -12,18 +12,18 @@
#define DRIVER_NAME "rtc-pcf8523"
#define REG_CONTROL1 0x00
-#define REG_CONTROL1_CAP_SEL (1 << 7)
-#define REG_CONTROL1_STOP (1 << 5)
+#define REG_CONTROL1_CAP_SEL BIT(7)
+#define REG_CONTROL1_STOP BIT(5)
#define REG_CONTROL3 0x02
-#define REG_CONTROL3_PM_BLD (1 << 7) /* battery low detection disabled */
-#define REG_CONTROL3_PM_VDD (1 << 6) /* switch-over disabled */
-#define REG_CONTROL3_PM_DSM (1 << 5) /* direct switching mode */
+#define REG_CONTROL3_PM_BLD BIT(7) /* battery low detection disabled */
+#define REG_CONTROL3_PM_VDD BIT(6) /* switch-over disabled */
+#define REG_CONTROL3_PM_DSM BIT(5) /* direct switching mode */
#define REG_CONTROL3_PM_MASK 0xe0
-#define REG_CONTROL3_BLF (1 << 2) /* battery low bit, read-only */
+#define REG_CONTROL3_BLF BIT(2) /* battery low bit, read-only */
#define REG_SECONDS 0x03
-#define REG_SECONDS_OS (1 << 7)
+#define REG_SECONDS_OS BIT(7)
#define REG_MINUTES 0x04
#define REG_HOURS 0x05
@@ -226,17 +226,6 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
u8 regs[8];
int err;
- /*
- * The hardware can only store values between 0 and 99 in it's YEAR
- * register (with 99 overflowing to 0 on increment).
- * After 2100-02-28 we could start interpreting the year to be in the
- * interval [2100, 2199], but there is no path to switch in a smooth way
- * because the chip handles YEAR=0x00 (and the out-of-spec
- * YEAR=0xa0) as a leap year, but 2100 isn't.
- */
- if (tm->tm_year < 100 || tm->tm_year >= 200)
- return -EINVAL;
-
err = pcf8523_stop_rtc(client);
if (err < 0)
return err;
@@ -356,12 +345,15 @@ static int pcf8523_probe(struct i2c_client *client,
if (err < 0)
return err;
- rtc = devm_rtc_device_register(&client->dev, DRIVER_NAME,
- &pcf8523_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- return 0;
+ rtc->ops = &pcf8523_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ return devm_rtc_register_device(rtc);
}
static const struct i2c_device_id pcf8523_id[] = {
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index 3450d615974d..a574c8d15a5c 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -418,11 +418,11 @@ static int pcf85363_probe(struct i2c_client *client,
pcf85363->rtc->ops = &rtc_ops_alarm;
}
- ret = rtc_register_device(pcf85363->rtc);
+ ret = devm_rtc_register_device(pcf85363->rtc);
for (i = 0; i < config->num_nvram; i++) {
nvmem_cfg[i].priv = pcf85363;
- rtc_nvmem_register(pcf85363->rtc, &nvmem_cfg[i]);
+ devm_rtc_nvmem_register(pcf85363->rtc, &nvmem_cfg[i]);
}
return ret;
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 2dc30eafa639..de3e6c355f2e 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -582,7 +582,7 @@ static int pcf8563_probe(struct i2c_client *client,
}
}
- err = rtc_register_device(pcf8563->rtc);
+ err = devm_rtc_register_device(pcf8563->rtc);
if (err)
return err;
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 2b6946744654..7fb9145c43bd 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -338,7 +338,7 @@ static int pic32_rtc_probe(struct platform_device *pdev)
pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret)
goto err_nortc;
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index ebe03eba8f5f..5a880516f3e8 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -121,7 +121,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
if (ret)
goto err_irq;
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
goto err_reg;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index c6b89273feba..224bbf096262 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -361,14 +361,16 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
device_init_wakeup(&adev->dev, true);
ldata->rtc = devm_rtc_allocate_device(&adev->dev);
- if (IS_ERR(ldata->rtc))
- return PTR_ERR(ldata->rtc);
+ if (IS_ERR(ldata->rtc)) {
+ ret = PTR_ERR(ldata->rtc);
+ goto out;
+ }
ldata->rtc->ops = ops;
ldata->rtc->range_min = vendor->range_min;
ldata->rtc->range_max = vendor->range_max;
- ret = rtc_register_device(ldata->rtc);
+ ret = devm_rtc_register_device(ldata->rtc);
if (ret)
goto out;
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index b45ee2cb2c04..0d9dd6faabba 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -508,7 +508,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
return rc;
}
- return rtc_register_device(rtc_dd->rtc);
+ return devm_rtc_register_device(rtc_dd->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-ps3.c b/drivers/rtc/rtc-ps3.c
index f0336d691e6c..6b098734c715 100644
--- a/drivers/rtc/rtc-ps3.c
+++ b/drivers/rtc/rtc-ps3.c
@@ -56,7 +56,7 @@ static int __init ps3_rtc_probe(struct platform_device *dev)
platform_set_drvdata(dev, rtc);
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver ps3_rtc_driver = {
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 7ceb968f0e44..60a3c3d7499b 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -127,7 +127,7 @@ static int r9701_probe(struct spi_device *spi)
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct spi_driver r9701_driver = {
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
index dd1a20977478..e73102a39f1b 100644
--- a/drivers/rtc/rtc-rc5t619.c
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -426,7 +426,7 @@ static int rc5t619_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "rc5t619 interrupt is disabled\n");
}
- return rtc_register_device(rtc->rtc);
+ return devm_rtc_register_device(rtc->rtc);
}
static struct platform_driver rc5t619_rtc_driver = {
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index c0334c602e88..e920da8c08da 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -447,7 +447,7 @@ static int rk808_rtc_probe(struct platform_device *pdev)
return ret;
}
- return rtc_register_device(rk808_rtc->rtc);
+ return devm_rtc_register_device(rk808_rtc->rtc);
}
static struct platform_driver rk808_rtc_driver = {
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 8776eadbdd3a..44afa6d996e7 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -251,16 +251,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
return PTR_ERR(rtc);
rtc->ops = &rp5c01_rtc_ops;
- rtc->nvram_old_abi = true;
priv->rtc = rtc;
nvmem_cfg.priv = priv;
- error = rtc_nvmem_register(rtc, &nvmem_cfg);
+ error = devm_rtc_nvmem_register(rtc, &nvmem_cfg);
if (error)
return error;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver rp5c01_rtc_driver = {
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 47c13678449e..fec633f80789 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -197,7 +197,7 @@ static int rs5c348_probe(struct spi_device *spi)
rtc->ops = &rs5c348_rtc_ops;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct spi_driver rs5c348_driver = {
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index fa226f0fe67d..979407a51c7a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -886,14 +886,14 @@ static int rv3028_probe(struct i2c_client *client)
rv3028->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv3028->rtc->range_max = RTC_TIMESTAMP_END_2099;
rv3028->rtc->ops = &rv3028_rtc_ops;
- ret = rtc_register_device(rv3028->rtc);
+ ret = devm_rtc_register_device(rv3028->rtc);
if (ret)
return ret;
nvmem_cfg.priv = rv3028->regmap;
- rtc_nvmem_register(rv3028->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(rv3028->rtc, &nvmem_cfg);
eeprom_cfg.priv = rv3028;
- rtc_nvmem_register(rv3028->rtc, &eeprom_cfg);
+ devm_rtc_nvmem_register(rv3028->rtc, &eeprom_cfg);
rv3028->rtc->max_user_freq = 1;
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 62718231731b..dc1bda62095e 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -750,12 +750,12 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
rv3029->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv3029->rtc->range_max = RTC_TIMESTAMP_END_2079;
- rc = rtc_register_device(rv3029->rtc);
+ rc = devm_rtc_register_device(rv3029->rtc);
if (rc)
return rc;
nvmem_cfg.priv = rv3029->regmap;
- rtc_nvmem_register(rv3029->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(rv3029->rtc, &nvmem_cfg);
return 0;
}
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index 3e67f71f4261..c9bcea727757 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -885,14 +885,14 @@ static int rv3032_probe(struct i2c_client *client)
rv3032->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv3032->rtc->range_max = RTC_TIMESTAMP_END_2099;
rv3032->rtc->ops = &rv3032_rtc_ops;
- ret = rtc_register_device(rv3032->rtc);
+ ret = devm_rtc_register_device(rv3032->rtc);
if (ret)
return ret;
- nvmem_cfg.priv = rv3032;
- rtc_nvmem_register(rv3032->rtc, &nvmem_cfg);
+ nvmem_cfg.priv = rv3032->regmap;
+ devm_rtc_nvmem_register(rv3032->rtc, &nvmem_cfg);
eeprom_cfg.priv = rv3032;
- rtc_nvmem_register(rv3032->rtc, &eeprom_cfg);
+ devm_rtc_nvmem_register(rv3032->rtc, &eeprom_cfg);
rv3032->rtc->max_user_freq = 1;
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index c6d8e3425688..d4ea6db51b26 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -585,14 +585,13 @@ static int rv8803_probe(struct i2c_client *client,
}
rv8803->rtc->ops = &rv8803_rtc_ops;
- rv8803->rtc->nvram_old_abi = true;
rv8803->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv8803->rtc->range_max = RTC_TIMESTAMP_END_2099;
- err = rtc_register_device(rv8803->rtc);
+ err = devm_rtc_register_device(rv8803->rtc);
if (err)
return err;
- rtc_nvmem_register(rv8803->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(rv8803->rtc, &nvmem_cfg);
rv8803->rtc->max_user_freq = 1;
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 3a9eb7043f01..a7b671a21022 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/spi/spi.h>
+#include <linux/i2c.h>
/* RX-6110 Register definitions */
#define RX6110_REG_SEC 0x10
@@ -310,6 +311,27 @@ static const struct rtc_class_ops rx6110_rtc_ops = {
.set_time = rx6110_set_time,
};
+static int rx6110_probe(struct rx6110_data *rx6110, struct device *dev)
+{
+ int err;
+
+ rx6110->rtc = devm_rtc_device_register(dev,
+ RX6110_DRIVER_NAME,
+ &rx6110_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rx6110->rtc))
+ return PTR_ERR(rx6110->rtc);
+
+ err = rx6110_init(rx6110);
+ if (err)
+ return err;
+
+ rx6110->rtc->max_user_freq = 1;
+
+ return 0;
+}
+
+#ifdef CONFIG_SPI_MASTER
static struct regmap_config regmap_spi_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -318,13 +340,12 @@ static struct regmap_config regmap_spi_config = {
};
/**
- * rx6110_probe - initialize rtc driver
+ * rx6110_spi_probe - initialize rtc driver
* @spi: pointer to spi device
*/
-static int rx6110_probe(struct spi_device *spi)
+static int rx6110_spi_probe(struct spi_device *spi)
{
struct rx6110_data *rx6110;
- int err;
if ((spi->bits_per_word && spi->bits_per_word != 8) ||
(spi->max_speed_hz > 2000000) ||
@@ -346,27 +367,14 @@ static int rx6110_probe(struct spi_device *spi)
spi_set_drvdata(spi, rx6110);
- rx6110->rtc = devm_rtc_device_register(&spi->dev,
- RX6110_DRIVER_NAME,
- &rx6110_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(rx6110->rtc))
- return PTR_ERR(rx6110->rtc);
-
- err = rx6110_init(rx6110);
- if (err)
- return err;
-
- rx6110->rtc->max_user_freq = 1;
-
- return 0;
+ return rx6110_probe(rx6110, &spi->dev);
}
-static const struct spi_device_id rx6110_id[] = {
+static const struct spi_device_id rx6110_spi_id[] = {
{ "rx6110", 0 },
{ }
};
-MODULE_DEVICE_TABLE(spi, rx6110_id);
+MODULE_DEVICE_TABLE(spi, rx6110_spi_id);
static const struct of_device_id rx6110_spi_of_match[] = {
{ .compatible = "epson,rx6110" },
@@ -374,16 +382,127 @@ static const struct of_device_id rx6110_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rx6110_spi_of_match);
-static struct spi_driver rx6110_driver = {
+static struct spi_driver rx6110_spi_driver = {
.driver = {
.name = RX6110_DRIVER_NAME,
.of_match_table = of_match_ptr(rx6110_spi_of_match),
},
- .probe = rx6110_probe,
- .id_table = rx6110_id,
+ .probe = rx6110_spi_probe,
+ .id_table = rx6110_spi_id,
+};
+
+static int rx6110_spi_register(void)
+{
+ return spi_register_driver(&rx6110_spi_driver);
+}
+
+static void rx6110_spi_unregister(void)
+{
+ spi_unregister_driver(&rx6110_spi_driver);
+}
+#else
+static int rx6110_spi_register(void)
+{
+ return 0;
+}
+
+static void rx6110_spi_unregister(void)
+{
+}
+#endif /* CONFIG_SPI_MASTER */
+
+#ifdef CONFIG_I2C
+static struct regmap_config regmap_i2c_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RX6110_REG_IRQ,
+ .read_flag_mask = 0x80,
};
-module_spi_driver(rx6110_driver);
+static int rx6110_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct rx6110_data *rx6110;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ dev_err(&adapter->dev,
+ "doesn't support required functionality\n");
+ return -EIO;
+ }
+
+ rx6110 = devm_kzalloc(&client->dev, sizeof(*rx6110), GFP_KERNEL);
+ if (!rx6110)
+ return -ENOMEM;
+
+ rx6110->regmap = devm_regmap_init_i2c(client, &regmap_i2c_config);
+ if (IS_ERR(rx6110->regmap)) {
+ dev_err(&client->dev, "regmap init failed for rtc rx6110\n");
+ return PTR_ERR(rx6110->regmap);
+ }
+
+ i2c_set_clientdata(client, rx6110);
+
+ return rx6110_probe(rx6110, &client->dev);
+}
+
+static const struct i2c_device_id rx6110_i2c_id[] = {
+ { "rx6110", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rx6110_i2c_id);
+
+static struct i2c_driver rx6110_i2c_driver = {
+ .driver = {
+ .name = RX6110_DRIVER_NAME,
+ },
+ .probe = rx6110_i2c_probe,
+ .id_table = rx6110_i2c_id,
+};
+
+static int rx6110_i2c_register(void)
+{
+ return i2c_add_driver(&rx6110_i2c_driver);
+}
+
+static void rx6110_i2c_unregister(void)
+{
+ i2c_del_driver(&rx6110_i2c_driver);
+}
+#else
+static int rx6110_i2c_register(void)
+{
+ return 0;
+}
+
+static void rx6110_i2c_unregister(void)
+{
+}
+#endif /* CONFIG_I2C */
+
+static int __init rx6110_module_init(void)
+{
+ int ret;
+
+ ret = rx6110_spi_register();
+ if (ret)
+ return ret;
+
+ ret = rx6110_i2c_register();
+ if (ret)
+ rx6110_spi_unregister();
+
+ return ret;
+}
+module_init(rx6110_module_init);
+
+static void __exit rx6110_module_exit(void)
+{
+ rx6110_spi_unregister();
+ rx6110_i2c_unregister();
+}
+module_exit(rx6110_module_exit);
MODULE_AUTHOR("Val Krutov <val.krutov@erd.epson.com>");
MODULE_DESCRIPTION("RX-6110 SA RTC driver");
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index dca41a2a39b2..8340ab47a059 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -419,7 +419,7 @@ static int rx8010_probe(struct i2c_client *client)
rx8010->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rx8010->rtc->range_max = RTC_TIMESTAMP_END_2099;
- return rtc_register_device(rx8010->rtc);
+ return devm_rtc_register_device(rx8010->rtc);
}
static struct i2c_driver rx8010_driver = {
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 490f70f57636..de109139529b 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -298,11 +298,11 @@ static int rx8581_probe(struct i2c_client *client,
rx8581->rtc->start_secs = 0;
rx8581->rtc->set_start_time = true;
- ret = rtc_register_device(rx8581->rtc);
+ ret = devm_rtc_register_device(rx8581->rtc);
for (i = 0; i < config->num_nvram; i++) {
nvmem_cfg[i].priv = rx8581;
- rtc_nvmem_register(rx8581->rtc, &nvmem_cfg[i]);
+ devm_rtc_nvmem_register(rx8581->rtc, &nvmem_cfg[i]);
}
return ret;
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 03672a246356..ea15d0392bb9 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -497,7 +497,7 @@ static int s35390a_probe(struct i2c_client *client,
if (status1 & S35390A_FLAG_INT2)
rtc_update_irq(s35390a->rtc, 1, RTC_AF);
- return rtc_register_device(s35390a->rtc);
+ return devm_rtc_register_device(s35390a->rtc);
}
static struct i2c_driver s35390a_driver = {
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 24a41909f049..fab326ba9cec 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -42,26 +42,15 @@ struct s3c_rtc {
const struct s3c_rtc_data *data;
int irq_alarm;
- int irq_tick;
-
- spinlock_t pie_lock;
spinlock_t alarm_lock;
- int ticnt_save;
- int ticnt_en_save;
bool wake_en;
};
struct s3c_rtc_data {
- int max_user_freq;
bool needs_src_clk;
void (*irq_handler) (struct s3c_rtc *info, int mask);
- void (*set_freq) (struct s3c_rtc *info, int freq);
- void (*enable_tick) (struct s3c_rtc *info, struct seq_file *seq);
- void (*select_tick_clk) (struct s3c_rtc *info);
- void (*save_tick_cnt) (struct s3c_rtc *info);
- void (*restore_tick_cnt) (struct s3c_rtc *info);
void (*enable) (struct s3c_rtc *info);
void (*disable) (struct s3c_rtc *info);
};
@@ -91,17 +80,7 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info)
clk_disable(info->rtc_clk);
}
-/* IRQ Handlers */
-static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
-{
- struct s3c_rtc *info = (struct s3c_rtc *)id;
-
- if (info->data->irq_handler)
- info->data->irq_handler(info, S3C2410_INTP_TIC);
-
- return IRQ_HANDLED;
-}
-
+/* IRQ Handler */
static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
{
struct s3c_rtc *info = (struct s3c_rtc *)id;
@@ -148,28 +127,6 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
return ret;
}
-/* Set RTC frequency */
-static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- int ret;
-
- if (!is_power_of_2(freq))
- return -EINVAL;
-
- ret = s3c_rtc_enable_clk(info);
- if (ret)
- return ret;
- spin_lock_irq(&info->pie_lock);
-
- if (info->data->set_freq)
- info->data->set_freq(info, freq);
-
- spin_unlock_irq(&info->pie_lock);
- s3c_rtc_disable_clk(info);
-
- return 0;
-}
-
/* Time read/write */
static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
@@ -348,29 +305,11 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
return 0;
}
-static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
-{
- struct s3c_rtc *info = dev_get_drvdata(dev);
- int ret;
-
- ret = s3c_rtc_enable_clk(info);
- if (ret)
- return ret;
-
- if (info->data->enable_tick)
- info->data->enable_tick(info, seq);
-
- s3c_rtc_disable_clk(info);
-
- return 0;
-}
-
static const struct rtc_class_ops s3c_rtcops = {
.read_time = s3c_rtc_gettime,
.set_time = s3c_rtc_settime,
.read_alarm = s3c_rtc_getalarm,
.set_alarm = s3c_rtc_setalarm,
- .proc = s3c_rtc_proc,
.alarm_irq_enable = s3c_rtc_setaie,
};
@@ -450,18 +389,12 @@ static int s3c_rtc_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- /* find the IRQs */
- info->irq_tick = platform_get_irq(pdev, 1);
- if (info->irq_tick < 0)
- return info->irq_tick;
-
info->dev = &pdev->dev;
info->data = of_device_get_match_data(&pdev->dev);
if (!info->data) {
dev_err(&pdev->dev, "failed getting s3c_rtc_data\n");
return -EINVAL;
}
- spin_lock_init(&info->pie_lock);
spin_lock_init(&info->alarm_lock);
platform_set_drvdata(pdev, info);
@@ -470,8 +403,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
if (info->irq_alarm < 0)
return info->irq_alarm;
- dev_dbg(&pdev->dev, "s3c2410_rtc: tick irq %d, alarm irq %d\n",
- info->irq_tick, info->irq_alarm);
+ dev_dbg(&pdev->dev, "s3c2410_rtc: alarm irq %d\n", info->irq_alarm);
/* get the memory region */
info->base = devm_platform_ioremap_resource(pdev, 0);
@@ -503,6 +435,10 @@ static int s3c_rtc_probe(struct platform_device *pdev)
goto err_src_clk;
}
+ /* disable RTC enable bits potentially set by the bootloader */
+ if (info->data->disable)
+ info->data->disable(info);
+
/* check to see if everything is setup correctly */
if (info->data->enable)
info->data->enable(info);
@@ -542,18 +478,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
goto err_nortc;
}
- ret = devm_request_irq(&pdev->dev, info->irq_tick, s3c_rtc_tickirq,
- 0, "s3c2410-rtc tick", info);
- if (ret) {
- dev_err(&pdev->dev, "IRQ%d error %d\n", info->irq_tick, ret);
- goto err_nortc;
- }
-
- if (info->data->select_tick_clk)
- info->data->select_tick_clk(info);
-
- s3c_rtc_setfreq(info, 1);
-
s3c_rtc_disable_clk(info);
return 0;
@@ -581,10 +505,6 @@ static int s3c_rtc_suspend(struct device *dev)
if (ret)
return ret;
- /* save TICNT for anyone using periodic interrupts */
- if (info->data->save_tick_cnt)
- info->data->save_tick_cnt(info);
-
if (info->data->disable)
info->data->disable(info);
@@ -605,9 +525,6 @@ static int s3c_rtc_resume(struct device *dev)
if (info->data->enable)
info->data->enable(info);
- if (info->data->restore_tick_cnt)
- info->data->restore_tick_cnt(info);
-
s3c_rtc_disable_clk(info);
if (device_may_wakeup(dev) && info->wake_en) {
@@ -631,162 +548,27 @@ static void s3c6410_rtc_irq(struct s3c_rtc *info, int mask)
writeb(mask, info->base + S3C2410_INTP);
}
-static void s3c2410_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- unsigned int tmp = 0;
- int val;
-
- tmp = readb(info->base + S3C2410_TICNT);
- tmp &= S3C2410_TICNT_ENABLE;
-
- val = (info->rtc->max_user_freq / freq) - 1;
- tmp |= val;
-
- writel(tmp, info->base + S3C2410_TICNT);
-}
-
-static void s3c2416_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- unsigned int tmp = 0;
- int val;
-
- tmp = readb(info->base + S3C2410_TICNT);
- tmp &= S3C2410_TICNT_ENABLE;
-
- val = (info->rtc->max_user_freq / freq) - 1;
-
- tmp |= S3C2443_TICNT_PART(val);
- writel(S3C2443_TICNT1_PART(val), info->base + S3C2443_TICNT1);
-
- writel(S3C2416_TICNT2_PART(val), info->base + S3C2416_TICNT2);
-
- writel(tmp, info->base + S3C2410_TICNT);
-}
-
-static void s3c2443_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- unsigned int tmp = 0;
- int val;
-
- tmp = readb(info->base + S3C2410_TICNT);
- tmp &= S3C2410_TICNT_ENABLE;
-
- val = (info->rtc->max_user_freq / freq) - 1;
-
- tmp |= S3C2443_TICNT_PART(val);
- writel(S3C2443_TICNT1_PART(val), info->base + S3C2443_TICNT1);
-
- writel(tmp, info->base + S3C2410_TICNT);
-}
-
-static void s3c6410_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- int val;
-
- val = (info->rtc->max_user_freq / freq) - 1;
- writel(val, info->base + S3C2410_TICNT);
-}
-
-static void s3c24xx_rtc_enable_tick(struct s3c_rtc *info, struct seq_file *seq)
-{
- unsigned int ticnt;
-
- ticnt = readb(info->base + S3C2410_TICNT);
- ticnt &= S3C2410_TICNT_ENABLE;
-
- seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt ? "yes" : "no");
-}
-
-static void s3c2416_rtc_select_tick_clk(struct s3c_rtc *info)
-{
- unsigned int con;
-
- con = readw(info->base + S3C2410_RTCCON);
- con |= S3C2443_RTCCON_TICSEL;
- writew(con, info->base + S3C2410_RTCCON);
-}
-
-static void s3c6410_rtc_enable_tick(struct s3c_rtc *info, struct seq_file *seq)
-{
- unsigned int ticnt;
-
- ticnt = readw(info->base + S3C2410_RTCCON);
- ticnt &= S3C64XX_RTCCON_TICEN;
-
- seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt ? "yes" : "no");
-}
-
-static void s3c24xx_rtc_save_tick_cnt(struct s3c_rtc *info)
-{
- info->ticnt_save = readb(info->base + S3C2410_TICNT);
-}
-
-static void s3c24xx_rtc_restore_tick_cnt(struct s3c_rtc *info)
-{
- writeb(info->ticnt_save, info->base + S3C2410_TICNT);
-}
-
-static void s3c6410_rtc_save_tick_cnt(struct s3c_rtc *info)
-{
- info->ticnt_en_save = readw(info->base + S3C2410_RTCCON);
- info->ticnt_en_save &= S3C64XX_RTCCON_TICEN;
- info->ticnt_save = readl(info->base + S3C2410_TICNT);
-}
-
-static void s3c6410_rtc_restore_tick_cnt(struct s3c_rtc *info)
-{
- unsigned int con;
-
- writel(info->ticnt_save, info->base + S3C2410_TICNT);
- if (info->ticnt_en_save) {
- con = readw(info->base + S3C2410_RTCCON);
- writew(con | info->ticnt_en_save, info->base + S3C2410_RTCCON);
- }
-}
-
static struct s3c_rtc_data const s3c2410_rtc_data = {
- .max_user_freq = 128,
.irq_handler = s3c24xx_rtc_irq,
- .set_freq = s3c2410_rtc_setfreq,
- .enable_tick = s3c24xx_rtc_enable_tick,
- .save_tick_cnt = s3c24xx_rtc_save_tick_cnt,
- .restore_tick_cnt = s3c24xx_rtc_restore_tick_cnt,
.enable = s3c24xx_rtc_enable,
.disable = s3c24xx_rtc_disable,
};
static struct s3c_rtc_data const s3c2416_rtc_data = {
- .max_user_freq = 32768,
.irq_handler = s3c24xx_rtc_irq,
- .set_freq = s3c2416_rtc_setfreq,
- .enable_tick = s3c24xx_rtc_enable_tick,
- .select_tick_clk = s3c2416_rtc_select_tick_clk,
- .save_tick_cnt = s3c24xx_rtc_save_tick_cnt,
- .restore_tick_cnt = s3c24xx_rtc_restore_tick_cnt,
.enable = s3c24xx_rtc_enable,
.disable = s3c24xx_rtc_disable,
};
static struct s3c_rtc_data const s3c2443_rtc_data = {
- .max_user_freq = 32768,
.irq_handler = s3c24xx_rtc_irq,
- .set_freq = s3c2443_rtc_setfreq,
- .enable_tick = s3c24xx_rtc_enable_tick,
- .select_tick_clk = s3c2416_rtc_select_tick_clk,
- .save_tick_cnt = s3c24xx_rtc_save_tick_cnt,
- .restore_tick_cnt = s3c24xx_rtc_restore_tick_cnt,
.enable = s3c24xx_rtc_enable,
.disable = s3c24xx_rtc_disable,
};
static struct s3c_rtc_data const s3c6410_rtc_data = {
- .max_user_freq = 32768,
.needs_src_clk = true,
.irq_handler = s3c6410_rtc_irq,
- .set_freq = s3c6410_rtc_setfreq,
- .enable_tick = s3c6410_rtc_enable_tick,
- .save_tick_cnt = s3c6410_rtc_save_tick_cnt,
- .restore_tick_cnt = s3c6410_rtc_restore_tick_cnt,
.enable = s3c24xx_rtc_enable,
.disable = s3c6410_rtc_disable,
};
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 9ccc97cf5e09..1250887e4382 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -205,7 +205,7 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
info->rtc->max_user_freq = RTC_FREQ;
info->rtc->range_max = U32_MAX;
- ret = rtc_register_device(info->rtc);
+ ret = devm_rtc_register_device(info->rtc);
if (ret) {
clk_disable_unprepare(info->clk);
return ret;
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
index 36810dd40cd3..187aa955b79c 100644
--- a/drivers/rtc/rtc-sc27xx.c
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -299,33 +299,6 @@ static int sprd_rtc_set_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
sts_mask);
}
-static int sprd_rtc_read_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct sprd_rtc *rtc = dev_get_drvdata(dev);
- time64_t secs;
- u32 val;
- int ret;
-
- ret = sprd_rtc_get_secs(rtc, SPRD_RTC_AUX_ALARM, &secs);
- if (ret)
- return ret;
-
- rtc_time64_to_tm(secs, &alrm->time);
-
- ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
- if (ret)
- return ret;
-
- alrm->enabled = !!(val & SPRD_RTC_AUXALM_EN);
-
- ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
- if (ret)
- return ret;
-
- alrm->pending = !!(val & SPRD_RTC_AUXALM_EN);
- return 0;
-}
-
static int sprd_rtc_set_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sprd_rtc *rtc = dev_get_drvdata(dev);
@@ -415,16 +388,9 @@ static int sprd_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
u32 val;
/*
- * Before RTC device is registered, it will check to see if there is an
- * alarm already set in RTC hardware, and we always read the normal
- * alarm at this time.
- *
- * Or if aie_timer is enabled, we should get the normal alarm time.
- * Otherwise we should get auxiliary alarm time.
+ * The RTC core checks to see if there is an alarm already set in RTC
+ * hardware, and we always read the normal alarm at this time.
*/
- if (rtc->rtc && rtc->rtc->registered && rtc->rtc->aie_timer.enabled == 0)
- return sprd_rtc_read_aux_alarm(dev, alrm);
-
ret = sprd_rtc_get_secs(rtc, SPRD_RTC_ALARM, &secs);
if (ret)
return ret;
@@ -563,7 +529,7 @@ static int sprd_rtc_check_power_down(struct sprd_rtc *rtc)
* means the RTC has been powered down, so the RTC time values are
* invalid.
*/
- rtc->valid = val == SPRD_RTC_POWER_RESET_VALUE ? false : true;
+ rtc->valid = val != SPRD_RTC_POWER_RESET_VALUE;
return 0;
}
@@ -652,7 +618,7 @@ static int sprd_rtc_probe(struct platform_device *pdev)
rtc->rtc->ops = &sprd_rtc_ops;
rtc->rtc->range_min = 0;
rtc->rtc->range_max = 5662310399LL;
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret) {
device_init_wakeup(&pdev->dev, 0);
return ret;
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index a7aa943c1183..f6bee69ba017 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -192,7 +192,7 @@ static int sd3078_probe(struct i2c_client *client,
sd3078->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
sd3078->rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(sd3078->rtc);
+ ret = devm_rtc_register_device(sd3078->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 9167b48014a1..cd146b574143 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -607,7 +607,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->range_max = mktime64(2098, 12, 31, 23, 59, 59);
}
- ret = rtc_register_device(rtc->rtc_dev);
+ ret = devm_rtc_register_device(rtc->rtc_dev);
if (ret)
goto err_unmap;
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index abf19435dbad..03a6cca23201 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -356,7 +356,7 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
return err;
}
- return rtc_register_device(rtcdrv->rtc);
+ return devm_rtc_register_device(rtcdrv->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 0263d996b8a8..bd929b0e7d7d 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -151,17 +151,14 @@ static int snvs_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned long time;
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
time = rtc_read_lp_counter(data);
rtc_time64_to_tm(time, tm);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return 0;
}
@@ -172,11 +169,9 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned long time = rtc_tm_to_time64(tm);
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
/* Disable RTC first */
ret = snvs_rtc_enable(data, false);
@@ -190,8 +185,7 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* Enable RTC again */
ret = snvs_rtc_enable(data, true);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return ret;
}
@@ -202,11 +196,9 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
u32 lptar, lpsr;
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
regmap_read(data->regmap, data->offset + SNVS_LPTAR, &lptar);
rtc_time64_to_tm(lptar, &alrm->time);
@@ -214,8 +206,7 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
alrm->pending = (lpsr & SNVS_LPSR_LPTA) ? 1 : 0;
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return 0;
}
@@ -225,11 +216,9 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR,
(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
@@ -237,8 +226,7 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
ret = rtc_write_sync_lp(data);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return ret;
}
@@ -249,11 +237,9 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned long time = rtc_tm_to_time64(&alrm->time);
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
ret = rtc_write_sync_lp(data);
@@ -264,8 +250,7 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* Clear alarm interrupt status bit */
regmap_write(data->regmap, data->offset + SNVS_LPSR, SNVS_LPSR_LPTA);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return snvs_rtc_alarm_irq_enable(dev, alrm->enabled);
}
@@ -285,8 +270,7 @@ static irqreturn_t snvs_rtc_irq_handler(int irq, void *dev_id)
u32 lpsr;
u32 events = 0;
- if (data->clk)
- clk_enable(data->clk);
+ clk_enable(data->clk);
regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
@@ -302,8 +286,7 @@ static irqreturn_t snvs_rtc_irq_handler(int irq, void *dev_id)
/* clear interrupt status */
regmap_write(data->regmap, data->offset + SNVS_LPSR, lpsr);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return events ? IRQ_HANDLED : IRQ_NONE;
}
@@ -316,8 +299,7 @@ static const struct regmap_config snvs_rtc_config = {
static void snvs_rtc_action(void *data)
{
- if (data)
- clk_disable_unprepare(data);
+ clk_disable_unprepare(data);
}
static int snvs_rtc_probe(struct platform_device *pdev)
@@ -405,15 +387,14 @@ static int snvs_rtc_probe(struct platform_device *pdev)
data->rtc->ops = &snvs_rtc_ops;
data->rtc->range_max = U32_MAX;
- return rtc_register_device(data->rtc);
+ return devm_rtc_register_device(data->rtc);
}
static int __maybe_unused snvs_rtc_suspend_noirq(struct device *dev)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return 0;
}
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 0c65448b85ee..bdb20f63254e 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -250,7 +250,7 @@ static int st_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->range_max = U64_MAX;
do_div(rtc->rtc_dev->range_max, rtc->clkrate);
- ret = rtc_register_device(rtc->rtc_dev);
+ ret = devm_rtc_register_device(rtc->rtc_dev);
if (ret) {
clk_disable_unprepare(rtc->clk);
return ret;
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c
index 37a26279e107..fbd1ed41cbf1 100644
--- a/drivers/rtc/rtc-starfire.c
+++ b/drivers/rtc/rtc-starfire.c
@@ -48,7 +48,7 @@ static int __init starfire_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver starfire_rtc_driver = {
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 01a45044f468..7cb6be1b7815 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -311,14 +311,13 @@ static int stk17ta8_rtc_probe(struct platform_device *pdev)
return PTR_ERR(pdata->rtc);
pdata->rtc->ops = &stk17ta8_rtc_ops;
- pdata->rtc->nvram_old_abi = true;
nvmem_cfg.priv = pdata;
- ret = rtc_nvmem_register(pdata->rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(pdata->rtc, &nvmem_cfg);
if (ret)
return ret;
- return rtc_register_device(pdata->rtc);
+ return devm_rtc_register_device(pdata->rtc);
}
/* work with hotplug and coldplug */
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 0a969af80af7..40c0f7ed36e0 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -366,7 +366,7 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
rtc_data->rtc->ops = &stmp3xxx_rtc_ops;
rtc_data->rtc->range_max = U32_MAX;
- err = rtc_register_device(rtc_data->rtc);
+ err = devm_rtc_register_device(rtc_data->rtc);
if (err)
return err;
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c
index 036463dfa103..a86e27de8c06 100644
--- a/drivers/rtc/rtc-sun4v.c
+++ b/drivers/rtc/rtc-sun4v.c
@@ -86,7 +86,7 @@ static int __init sun4v_rtc_probe(struct platform_device *pdev)
rtc->range_max = U64_MAX;
platform_set_drvdata(pdev, rtc);
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver sun4v_rtc_driver = {
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index e2b8b150bcb4..adec1b14a8de 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -272,7 +272,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
300000000);
if (IS_ERR(rtc->int_osc)) {
pr_crit("Couldn't register the internal oscillator\n");
- return;
+ goto err;
}
parents[0] = clk_hw_get_name(rtc->int_osc);
@@ -290,7 +290,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
rtc->losc = clk_register(NULL, &rtc->hw);
if (IS_ERR(rtc->losc)) {
pr_crit("Couldn't register the LOSC clock\n");
- return;
+ goto err_register;
}
of_property_read_string_index(node, "clock-output-names", 1,
@@ -301,7 +301,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
&rtc->lock);
if (IS_ERR(rtc->ext_losc)) {
pr_crit("Couldn't register the LOSC external gate\n");
- return;
+ goto err_register;
}
clk_data->num = 2;
@@ -314,6 +314,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
return;
+err_register:
+ clk_hw_unregister_fixed_rate(rtc->int_osc);
err:
kfree(clk_data);
}
@@ -724,7 +726,7 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
chip->rtc->ops = &sun6i_rtc_ops;
chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
- ret = rtc_register_device(chip->rtc);
+ ret = devm_rtc_register_device(chip->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index f5d7f44550ce..5d019e3a835a 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -470,7 +470,7 @@ static int sunxi_rtc_probe(struct platform_device *pdev)
chip->rtc->ops = &sunxi_rtc_ops;
- return rtc_register_device(chip->rtc);
+ return devm_rtc_register_device(chip->rtc);
}
static struct platform_driver sunxi_rtc_driver = {
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 7fbb1741692f..8925015cc698 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -329,7 +329,7 @@ static int tegra_rtc_probe(struct platform_device *pdev)
goto disable_clk;
}
- ret = rtc_register_device(info->rtc);
+ ret = devm_rtc_register_device(info->rtc);
if (ret)
goto disable_clk;
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 74b3a0603b73..7e0d8fb26465 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -50,7 +50,6 @@ static int test_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (expires > U32_MAX)
expires = U32_MAX;
- pr_err("ABE: %s +%d %s\n", __FILE__, __LINE__, __func__);
rtd->alarm.expires = expires;
if (alrm->enabled)
@@ -139,7 +138,7 @@ static int test_probe(struct platform_device *plat_dev)
timer_setup(&rtd->alarm, test_rtc_alarm_handler, 0);
rtd->alarm.expires = 0;
- return rtc_register_device(rtd->rtc);
+ return devm_rtc_register_device(rtd->rtc);
}
static struct platform_driver test_driver = {
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index e39af2d67051..a980337c3065 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -280,7 +280,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
goto fail_rtc_register;
}
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
goto fail_rtc_register;
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index e3840386f430..2d87b62826a8 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -434,7 +434,7 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
tps_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
tps_rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
- return rtc_register_device(tps_rtc->rtc);
+ return devm_rtc_register_device(tps_rtc->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 715b82981279..c3309db5448d 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -266,17 +266,16 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc);
rtc->ops = &tx4939_rtc_ops;
- rtc->nvram_old_abi = true;
rtc->range_max = U32_MAX;
pdata->rtc = rtc;
nvmem_cfg.priv = pdata;
- ret = rtc_nvmem_register(rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(rtc, &nvmem_cfg);
if (ret)
return ret;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static int __exit tx4939_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c3671043ace7..5a9f9ad86d32 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -335,7 +335,7 @@ static int rtc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
- retval = rtc_register_device(rtc);
+ retval = devm_rtc_register_device(rtc);
if (retval)
goto err_iounmap_all;
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index e2588625025f..197b649cd629 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -232,7 +232,7 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
return ret;
}
- return rtc_register_device(vt8500_rtc->rtc);
+ return devm_rtc_register_device(vt8500_rtc->rtc);
}
static int vt8500_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-wilco-ec.c b/drivers/rtc/rtc-wilco-ec.c
index ff46066a68a4..2a205a646452 100644
--- a/drivers/rtc/rtc-wilco-ec.c
+++ b/drivers/rtc/rtc-wilco-ec.c
@@ -176,7 +176,7 @@ static int wilco_ec_rtc_probe(struct platform_device *pdev)
rtc->range_max = RTC_TIMESTAMP_END_2099;
rtc->owner = THIS_MODULE;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver wilco_ec_rtc_driver = {
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index ccef887d2690..640833e21057 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -429,7 +429,7 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
wm831x_rtc->rtc->ops = &wm831x_rtc_ops;
wm831x_rtc->rtc->range_max = U32_MAX;
- ret = rtc_register_device(wm831x_rtc->rtc);
+ ret = devm_rtc_register_device(wm831x_rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 96db441f92b3..cf68a9b1c9eb 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -185,7 +185,7 @@ static int xgene_rtc_probe(struct platform_device *pdev)
pdata->rtc->ops = &xgene_rtc_ops;
pdata->rtc->range_max = U32_MAX;
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret) {
clk_disable_unprepare(pdata->clk);
return ret;
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 4b1077e2f826..f440bb52be92 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -264,7 +264,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- return rtc_register_device(xrtcdev->rtc);
+ return devm_rtc_register_device(xrtcdev->rtc);
}
static int xlnx_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index 950fac0d41ff..8a957d31a1a4 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -317,8 +317,6 @@ int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps)
size_t old_cnt = 0, add_cnt = 0, new_cnt;
const struct attribute_group **groups, **old;
- if (rtc->registered)
- return -EINVAL;
if (!grps)
return -EINVAL;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fd568248fd26..c7eb9a10c680 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -430,7 +430,7 @@ dasd_state_ready_to_online(struct dasd_device * device)
{
struct gendisk *disk;
struct disk_part_iter piter;
- struct hd_struct *part;
+ struct block_device *part;
device->state = DASD_STATE_ONLINE;
if (device->block) {
@@ -443,7 +443,7 @@ dasd_state_ready_to_online(struct dasd_device * device)
disk = device->block->bdev->bd_disk;
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
while ((part = disk_part_iter_next(&piter)))
- kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+ kobject_uevent(bdev_kobj(part), KOBJ_CHANGE);
disk_part_iter_exit(&piter);
}
return 0;
@@ -457,7 +457,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
int rc;
struct gendisk *disk;
struct disk_part_iter piter;
- struct hd_struct *part;
+ struct block_device *part;
if (device->discipline->online_to_ready) {
rc = device->discipline->online_to_ready(device);
@@ -470,7 +470,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
disk = device->block->bdev->bd_disk;
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
while ((part = disk_part_iter_next(&piter)))
- kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+ kobject_uevent(bdev_kobj(part), KOBJ_CHANGE);
disk_part_iter_exit(&piter);
}
return 0;
@@ -2084,19 +2084,24 @@ static void __dasd_device_start_head(struct dasd_device *device)
static void __dasd_device_check_path_events(struct dasd_device *device)
{
+ __u8 tbvpm, fcsecpm;
int rc;
- if (!dasd_path_get_tbvpm(device))
+ tbvpm = dasd_path_get_tbvpm(device);
+ fcsecpm = dasd_path_get_fcsecpm(device);
+
+ if (!tbvpm && !fcsecpm)
return;
if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
return;
- rc = device->discipline->verify_path(device,
- dasd_path_get_tbvpm(device));
- if (rc)
+ rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
+ if (rc) {
dasd_device_set_timer(device, 50);
- else
+ } else {
dasd_path_clear_all_verify(device);
+ dasd_path_clear_all_fcsec(device);
+ }
};
/*
@@ -3376,6 +3381,7 @@ dasd_device_operations = {
.ioctl = dasd_ioctl,
.compat_ioctl = dasd_ioctl,
.getgeo = dasd_getgeo,
+ .set_read_only = dasd_set_read_only,
};
/*******************************************************************************
@@ -3447,8 +3453,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
* Initial attempt at a probe function. this can be simplified once
* the other detection code is gone.
*/
-int dasd_generic_probe(struct ccw_device *cdev,
- struct dasd_discipline *discipline)
+int dasd_generic_probe(struct ccw_device *cdev)
{
int ret;
@@ -3846,6 +3851,10 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
if (device->discipline->kick_validate)
device->discipline->kick_validate(device);
}
+ if (path_event[chp] & PE_PATH_FCES_EVENT) {
+ dasd_path_fcsec_update(device, chp);
+ dasd_schedule_device_bh(device);
+ }
}
hpfpm = dasd_path_get_hpfpm(device);
ifccpm = dasd_path_get_ifccpm(device);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 99f86612f775..dc78a523a69f 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -256,7 +256,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
return;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
- list_del_init(&device->alias_list);
/* make sure that the workers don't use this device */
if (device == lcu->suc_data.device) {
spin_unlock_irqrestore(&lcu->lock, flags);
@@ -283,6 +282,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
spin_lock_irqsave(&aliastree.lock, flags);
spin_lock(&lcu->lock);
+ list_del_init(&device->alias_list);
if (list_empty(&lcu->grouplist) &&
list_empty(&lcu->active_devices) &&
list_empty(&lcu->inactive_devices)) {
@@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device,
spin_unlock_irqrestore(&lcu->lock, flags);
rc = dasd_sleep_on(cqr);
- if (rc && !suborder_not_supported(cqr)) {
+ if (!rc)
+ goto out;
+
+ if (suborder_not_supported(cqr)) {
+ /* suborder not supported or device unusable for IO */
+ rc = -EOPNOTSUPP;
+ } else {
+ /* IO failed but should be retried */
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
}
+out:
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -503,6 +511,14 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
return rc;
spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * there is another update needed skip the remaining handling
+ * the data might already be outdated
+ * but especially do not add the device to an LCU with pending
+ * update
+ */
+ if (lcu->flags & NEED_UAC_UPDATE)
+ goto out;
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
@@ -521,6 +537,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
alias_list) {
_add_device_to_lcu(lcu, device, refdev);
}
+out:
spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
@@ -625,6 +642,7 @@ int dasd_alias_add_device(struct dasd_device *device)
}
if (lcu->flags & UPDATE_PENDING) {
list_move(&device->alias_list, &lcu->active_devices);
+ private->pavgroup = NULL;
_schedule_lcu_update(lcu, device);
}
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 32fc51341d99..03d27ee9cac6 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -576,6 +576,11 @@ dasd_create_device(struct ccw_device *cdev)
dev_set_drvdata(&cdev->dev, device);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ device->paths_info = kset_create_and_add("paths_info", NULL,
+ &device->cdev->dev.kobj);
+ if (!device->paths_info)
+ dev_warn(&cdev->dev, "Could not create paths_info kset\n");
+
return device;
}
@@ -622,6 +627,9 @@ dasd_delete_device(struct dasd_device *device)
wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
dasd_generic_free_discipline(device);
+
+ kset_unregister(device->paths_info);
+
/* Disconnect dasd_device structure from ccw_device structure. */
cdev = device->cdev;
device->cdev = NULL;
@@ -1641,6 +1649,39 @@ dasd_path_interval_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
dasd_path_interval_store);
+static ssize_t
+dasd_device_fcs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ int fc_sec;
+ int rc;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ fc_sec = dasd_path_get_fcs_device(device);
+ if (fc_sec == -EINVAL)
+ rc = snprintf(buf, PAGE_SIZE, "Inconsistent\n");
+ else
+ rc = snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec));
+ dasd_put_device(device);
+
+ return rc;
+}
+static DEVICE_ATTR(fc_security, 0444, dasd_device_fcs_show, NULL);
+
+static ssize_t
+dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct dasd_path *path = to_dasd_path(kobj);
+ unsigned int fc_sec = path->fc_security;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec));
+}
+
+static struct kobj_attribute path_fcs_attribute =
+ __ATTR(fc_security, 0444, dasd_path_fcs_show, NULL);
#define DASD_DEFINE_ATTR(_name, _func) \
static ssize_t dasd_##_name##_show(struct device *dev, \
@@ -1697,6 +1738,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_path_reset.attr,
&dev_attr_hpf.attr,
&dev_attr_ese.attr,
+ &dev_attr_fc_security.attr,
NULL,
};
@@ -1777,6 +1819,81 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
}
EXPORT_SYMBOL(dasd_set_feature);
+static struct attribute *paths_info_attrs[] = {
+ &path_fcs_attribute.attr,
+ NULL,
+};
+
+static struct kobj_type path_attr_type = {
+ .release = dasd_path_release,
+ .default_attrs = paths_info_attrs,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void dasd_path_init_kobj(struct dasd_device *device, int chp)
+{
+ device->path[chp].kobj.kset = device->paths_info;
+ kobject_init(&device->path[chp].kobj, &path_attr_type);
+}
+
+void dasd_path_create_kobj(struct dasd_device *device, int chp)
+{
+ int rc;
+
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
+ return;
+ if (!device->paths_info) {
+ dev_warn(&device->cdev->dev, "Unable to create paths objects\n");
+ return;
+ }
+ if (device->path[chp].in_sysfs)
+ return;
+ if (!device->path[chp].conf_data)
+ return;
+
+ dasd_path_init_kobj(device, chp);
+
+ rc = kobject_add(&device->path[chp].kobj, NULL, "%x.%02x",
+ device->path[chp].cssid, device->path[chp].chpid);
+ if (rc)
+ kobject_put(&device->path[chp].kobj);
+ device->path[chp].in_sysfs = true;
+}
+EXPORT_SYMBOL(dasd_path_create_kobj);
+
+void dasd_path_create_kobjects(struct dasd_device *device)
+{
+ u8 lpm, opm;
+
+ opm = dasd_path_get_opm(device);
+ for (lpm = 0x80; lpm; lpm >>= 1) {
+ if (!(lpm & opm))
+ continue;
+ dasd_path_create_kobj(device, pathmask_to_pos(lpm));
+ }
+}
+EXPORT_SYMBOL(dasd_path_create_kobjects);
+
+static void dasd_path_remove_kobj(struct dasd_device *device, int chp)
+{
+ if (device->path[chp].in_sysfs) {
+ kobject_put(&device->path[chp].kobj);
+ device->path[chp].in_sysfs = false;
+ }
+}
+
+/*
+ * As we keep kobjects for the lifetime of a device, this function must not be
+ * called anywhere but in the context of offlining a device.
+ */
+void dasd_path_remove_kobjects(struct dasd_device *device)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ dasd_path_remove_kobj(device, i);
+}
+EXPORT_SYMBOL(dasd_path_remove_kobjects);
int dasd_add_sysfs_files(struct ccw_device *cdev)
{
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 758ee4153ac1..65eb87cbbb9b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -103,7 +103,7 @@ struct ext_pool_exhaust_work_data {
};
/* definitions for the path verification worker */
-struct path_verification_work_data {
+struct pe_handler_work_data {
struct work_struct worker;
struct dasd_device *device;
struct dasd_ccw_req cqr;
@@ -111,9 +111,10 @@ struct path_verification_work_data {
__u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
int isglobal;
__u8 tbvpm;
+ __u8 fcsecpm;
};
-static struct path_verification_work_data *path_verification_worker;
-static DEFINE_MUTEX(dasd_path_verification_mutex);
+static struct pe_handler_work_data *pe_handler_worker;
+static DEFINE_MUTEX(dasd_pe_handler_mutex);
struct check_attention_work_data {
struct work_struct worker;
@@ -143,7 +144,7 @@ dasd_eckd_probe (struct ccw_device *cdev)
"ccw-device options");
return ret;
}
- ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
+ ret = dasd_generic_probe(cdev);
return ret;
}
@@ -1000,6 +1001,27 @@ static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
return 0;
}
+static void dasd_eckd_store_conf_data(struct dasd_device *device,
+ struct dasd_conf_data *conf_data, int chp)
+{
+ struct channel_path_desc_fmt0 *chp_desc;
+ struct subchannel_id sch_id;
+
+ ccw_device_get_schid(device->cdev, &sch_id);
+ /*
+ * path handling and read_conf allocate data
+ * free it before replacing the pointer
+ */
+ kfree(device->path[chp].conf_data);
+ device->path[chp].conf_data = conf_data;
+ device->path[chp].cssid = sch_id.cssid;
+ device->path[chp].ssid = sch_id.ssid;
+ chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
+ if (chp_desc)
+ device->path[chp].chpid = chp_desc->chpid;
+ kfree(chp_desc);
+}
+
static void dasd_eckd_clear_conf_data(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
@@ -1013,9 +1035,32 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
device->path[i].cssid = 0;
device->path[i].ssid = 0;
device->path[i].chpid = 0;
+ dasd_path_notoper(device, i);
}
}
+static void dasd_eckd_read_fc_security(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ u8 esm_valid;
+ u8 esm[8];
+ int chp;
+ int rc;
+
+ rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
+ if (rc) {
+ for (chp = 0; chp < 8; chp++)
+ device->path[chp].fc_security = 0;
+ return;
+ }
+
+ for (chp = 0; chp < 8; chp++) {
+ if (esm_valid & (0x80 >> chp))
+ device->path[chp].fc_security = esm[chp];
+ else
+ device->path[chp].fc_security = 0;
+ }
+}
static int dasd_eckd_read_conf(struct dasd_device *device)
{
@@ -1026,12 +1071,9 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
struct dasd_eckd_private *private, path_private;
struct dasd_uid *uid;
char print_path_uid[60], print_device_uid[60];
- struct channel_path_desc_fmt0 *chp_desc;
- struct subchannel_id sch_id;
private = device->private;
opm = ccw_device_get_path_mask(device->cdev);
- ccw_device_get_schid(device->cdev, &sch_id);
conf_data_saved = 0;
path_err = 0;
/* get configuration data per operational path */
@@ -1066,15 +1108,6 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
kfree(conf_data);
continue;
}
- pos = pathmask_to_pos(lpm);
- /* store per path conf_data */
- device->path[pos].conf_data = conf_data;
- device->path[pos].cssid = sch_id.cssid;
- device->path[pos].ssid = sch_id.ssid;
- chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
- if (chp_desc)
- device->path[pos].chpid = chp_desc->chpid;
- kfree(chp_desc);
/*
* build device UID that other path data
* can be compared to it
@@ -1132,18 +1165,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
dasd_path_add_cablepm(device, lpm);
continue;
}
- pos = pathmask_to_pos(lpm);
- /* store per path conf_data */
- device->path[pos].conf_data = conf_data;
- device->path[pos].cssid = sch_id.cssid;
- device->path[pos].ssid = sch_id.ssid;
- chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
- if (chp_desc)
- device->path[pos].chpid = chp_desc->chpid;
- kfree(chp_desc);
path_private.conf_data = NULL;
path_private.conf_len = 0;
}
+
+ pos = pathmask_to_pos(lpm);
+ dasd_eckd_store_conf_data(device, conf_data, pos);
+
switch (dasd_eckd_path_access(conf_data, conf_len)) {
case 0x02:
dasd_path_add_nppm(device, lpm);
@@ -1160,6 +1188,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
}
}
+ dasd_eckd_read_fc_security(device);
+
return path_err;
}
@@ -1219,7 +1249,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
}
static int rebuild_device_uid(struct dasd_device *device,
- struct path_verification_work_data *data)
+ struct pe_handler_work_data *data)
{
struct dasd_eckd_private *private = device->private;
__u8 lpm, opm = dasd_path_get_opm(device);
@@ -1257,31 +1287,18 @@ static int rebuild_device_uid(struct dasd_device *device,
return rc;
}
-static void do_path_verification_work(struct work_struct *work)
+static void dasd_eckd_path_available_action(struct dasd_device *device,
+ struct pe_handler_work_data *data)
{
- struct path_verification_work_data *data;
- struct dasd_device *device;
struct dasd_eckd_private path_private;
struct dasd_uid *uid;
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
+ struct dasd_conf_data *conf_data;
unsigned long flags;
char print_uid[60];
- int rc;
-
- data = container_of(work, struct path_verification_work_data, worker);
- device = data->device;
+ int rc, pos;
- /* delay path verification until device was resumed */
- if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
- schedule_work(work);
- return;
- }
- /* check if path verification already running and delay if so */
- if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
- schedule_work(work);
- return;
- }
opm = 0;
npm = 0;
ppm = 0;
@@ -1397,6 +1414,14 @@ static void do_path_verification_work(struct work_struct *work)
}
}
+ conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
+ if (conf_data) {
+ memcpy(conf_data, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ }
+ pos = pathmask_to_pos(lpm);
+ dasd_eckd_store_conf_data(device, conf_data, pos);
+
/*
* There is a small chance that a path is lost again between
* above path verification and the following modification of
@@ -1417,34 +1442,65 @@ static void do_path_verification_work(struct work_struct *work)
dasd_path_add_cablepm(device, cablepm);
dasd_path_add_nohpfpm(device, hpfpm);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ dasd_path_create_kobj(device, pos);
+ }
+}
+
+static void do_pe_handler_work(struct work_struct *work)
+{
+ struct pe_handler_work_data *data;
+ struct dasd_device *device;
+
+ data = container_of(work, struct pe_handler_work_data, worker);
+ device = data->device;
+
+ /* delay path verification until device was resumed */
+ if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
+ schedule_work(work);
+ return;
+ }
+ /* check if path verification already running and delay if so */
+ if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
+ schedule_work(work);
+ return;
}
+
+ if (data->tbvpm)
+ dasd_eckd_path_available_action(device, data);
+ if (data->fcsecpm)
+ dasd_eckd_read_fc_security(device);
+
clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
dasd_put_device(device);
if (data->isglobal)
- mutex_unlock(&dasd_path_verification_mutex);
+ mutex_unlock(&dasd_pe_handler_mutex);
else
kfree(data);
}
-static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
+static int dasd_eckd_pe_handler(struct dasd_device *device,
+ __u8 tbvpm, __u8 fcsecpm)
{
- struct path_verification_work_data *data;
+ struct pe_handler_work_data *data;
data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
if (!data) {
- if (mutex_trylock(&dasd_path_verification_mutex)) {
- data = path_verification_worker;
+ if (mutex_trylock(&dasd_pe_handler_mutex)) {
+ data = pe_handler_worker;
data->isglobal = 1;
- } else
+ } else {
return -ENOMEM;
+ }
} else {
memset(data, 0, sizeof(*data));
data->isglobal = 0;
}
- INIT_WORK(&data->worker, do_path_verification_work);
+ INIT_WORK(&data->worker, do_pe_handler_work);
dasd_get_device(device);
data->device = device;
- data->tbvpm = lpm;
+ data->tbvpm = tbvpm;
+ data->fcsecpm = fcsecpm;
schedule_work(&data->worker);
return 0;
}
@@ -2056,6 +2112,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err3;
+ dasd_path_create_kobjects(device);
+
/* Read Feature Codes */
dasd_eckd_read_features(device);
@@ -2114,6 +2172,7 @@ out_err2:
device->block = NULL;
out_err1:
dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
kfree(device->private);
device->private = NULL;
return rc;
@@ -2132,6 +2191,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->vdsneq = NULL;
private->gneq = NULL;
dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
}
static struct dasd_ccw_req *
@@ -6590,7 +6650,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.check_device = dasd_eckd_check_characteristics,
.uncheck_device = dasd_eckd_uncheck_device,
.do_analysis = dasd_eckd_do_analysis,
- .verify_path = dasd_eckd_verify_path,
+ .pe_handler = dasd_eckd_pe_handler,
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
.basic_to_known = dasd_eckd_basic_to_known,
@@ -6649,16 +6709,16 @@ dasd_eckd_init(void)
GFP_KERNEL | GFP_DMA);
if (!dasd_vol_info_req)
return -ENOMEM;
- path_verification_worker = kmalloc(sizeof(*path_verification_worker),
- GFP_KERNEL | GFP_DMA);
- if (!path_verification_worker) {
+ pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
+ GFP_KERNEL | GFP_DMA);
+ if (!pe_handler_worker) {
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
return -ENOMEM;
}
rawpadpage = (void *)__get_free_page(GFP_KERNEL);
if (!rawpadpage) {
- kfree(path_verification_worker);
+ kfree(pe_handler_worker);
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
return -ENOMEM;
@@ -6667,7 +6727,7 @@ dasd_eckd_init(void)
if (!ret)
wait_for_device_probe();
else {
- kfree(path_verification_worker);
+ kfree(pe_handler_worker);
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
free_page((unsigned long)rawpadpage);
@@ -6679,7 +6739,7 @@ static void __exit
dasd_eckd_cleanup(void)
{
ccw_driver_unregister(&dasd_eckd_driver);
- kfree(path_verification_worker);
+ kfree(pe_handler_worker);
kfree(dasd_reserve_req);
free_page((unsigned long)rawpadpage);
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index c027344ee225..1aeb68794ce8 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -58,7 +58,7 @@ static struct ccw_driver dasd_fba_driver; /* see below */
static int
dasd_fba_probe(struct ccw_device *cdev)
{
- return dasd_generic_probe(cdev, &dasd_fba_discipline);
+ return dasd_generic_probe(cdev);
}
static int
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 7a34161ea5c6..b8a04c42d1d2 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -298,6 +298,7 @@ struct dasd_discipline {
* configuration.
*/
int (*verify_path)(struct dasd_device *, __u8);
+ int (*pe_handler)(struct dasd_device *, __u8, __u8);
/*
* Last things to do when a device is set online, and first things
@@ -418,10 +419,40 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
#define DASD_PATH_NOHPF 6
#define DASD_PATH_CUIR 7
#define DASD_PATH_IFCC 8
+#define DASD_PATH_FCSEC 9
#define DASD_THRHLD_MAX 4294967295U
#define DASD_INTERVAL_MAX 4294967295U
+/* FC Endpoint Security Capabilities */
+#define DASD_FC_SECURITY_UNSUP 0
+#define DASD_FC_SECURITY_AUTH 1
+#define DASD_FC_SECURITY_ENC_FCSP2 2
+#define DASD_FC_SECURITY_ENC_ERAS 3
+
+#define DASD_FC_SECURITY_ENC_STR "Encryption"
+static const struct {
+ u8 value;
+ char *name;
+} dasd_path_fcs_mnemonics[] = {
+ { DASD_FC_SECURITY_UNSUP, "Unsupported" },
+ { DASD_FC_SECURITY_AUTH, "Authentication" },
+ { DASD_FC_SECURITY_ENC_FCSP2, DASD_FC_SECURITY_ENC_STR },
+ { DASD_FC_SECURITY_ENC_ERAS, DASD_FC_SECURITY_ENC_STR },
+};
+
+static inline char *dasd_path_get_fcs_str(int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dasd_path_fcs_mnemonics); i++) {
+ if (dasd_path_fcs_mnemonics[i].value == val)
+ return dasd_path_fcs_mnemonics[i].name;
+ }
+
+ return dasd_path_fcs_mnemonics[0].name;
+}
+
struct dasd_path {
unsigned long flags;
u8 cssid;
@@ -430,8 +461,18 @@ struct dasd_path {
struct dasd_conf_data *conf_data;
atomic_t error_count;
unsigned long errorclk;
+ u8 fc_security;
+ struct kobject kobj;
+ bool in_sysfs;
};
+#define to_dasd_path(path) container_of(path, struct dasd_path, kobj)
+
+static inline void dasd_path_release(struct kobject *kobj)
+{
+/* Memory for the dasd_path kobject is freed when dasd_free_device() is called */
+}
+
struct dasd_profile_info {
/* legacy part of profile data, as in dasd_profile_info_t */
@@ -542,6 +583,7 @@ struct dasd_device {
struct dentry *hosts_dentry;
struct dasd_profile profile;
struct dasd_format_entry format_entry;
+ struct kset *paths_info;
};
struct dasd_block {
@@ -766,7 +808,7 @@ void dasd_block_set_timer(struct dasd_block *, int);
void dasd_block_clear_timer(struct dasd_block *);
int dasd_cancel_req(struct dasd_ccw_req *);
int dasd_flush_device_queue(struct dasd_device *);
-int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
+int dasd_generic_probe(struct ccw_device *);
void dasd_generic_free_discipline(struct dasd_device *);
void dasd_generic_remove (struct ccw_device *cdev);
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
@@ -814,6 +856,9 @@ int dasd_set_feature(struct ccw_device *, int, int);
int dasd_add_sysfs_files(struct ccw_device *);
void dasd_remove_sysfs_files(struct ccw_device *);
+void dasd_path_create_kobj(struct dasd_device *, int);
+void dasd_path_create_kobjects(struct dasd_device *);
+void dasd_path_remove_kobjects(struct dasd_device *);
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
@@ -834,7 +879,8 @@ int dasd_scan_partitions(struct dasd_block *);
void dasd_destroy_partitions(struct dasd_block *);
/* externals in dasd_ioctl.c */
-int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
+int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
+int dasd_set_read_only(struct block_device *bdev, bool ro);
/* externals in dasd_proc.c */
int dasd_proc_init(void);
@@ -911,6 +957,29 @@ static inline void dasd_path_clear_all_verify(struct dasd_device *device)
dasd_path_clear_verify(device, chp);
}
+static inline void dasd_path_fcsec(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_FCSEC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_fcsec(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_FCSEC, &device->path[chp].flags);
+}
+
+static inline int dasd_path_need_fcsec(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_FCSEC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_all_fcsec(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ dasd_path_clear_fcsec(device, chp);
+}
+
static inline void dasd_path_operational(struct dasd_device *device, int chp)
{
__set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
@@ -1036,6 +1105,17 @@ static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
return tbvpm;
}
+static inline int dasd_path_get_fcsecpm(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_need_fcsec(device, chp))
+ return 1;
+
+ return 0;
+}
+
static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
{
int chp;
@@ -1103,6 +1183,31 @@ static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
return hpfpm;
}
+static inline u8 dasd_path_get_fcs_path(struct dasd_device *device, int chp)
+{
+ return device->path[chp].fc_security;
+}
+
+static inline int dasd_path_get_fcs_device(struct dasd_device *device)
+{
+ u8 fc_sec = 0;
+ int chp;
+
+ for (chp = 0; chp < 8; chp++) {
+ if (device->opm & (0x80 >> chp)) {
+ fc_sec = device->path[chp].fc_security;
+ break;
+ }
+ }
+ for (; chp < 8; chp++) {
+ if (device->opm & (0x80 >> chp))
+ if (device->path[chp].fc_security != fc_sec)
+ return -EINVAL;
+ }
+
+ return fc_sec;
+}
+
/*
* add functions for path masks
* the existing path mask will be extended by the given path mask
@@ -1268,6 +1373,11 @@ static inline void dasd_path_notoper(struct dasd_device *device, int chp)
dasd_path_clear_nonpreferred(device, chp);
}
+static inline void dasd_path_fcsec_update(struct dasd_device *device, int chp)
+{
+ dasd_path_fcsec(device, chp);
+}
+
/*
* remove all paths from normal operation
*/
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index cb6427fb9f3d..9f6424408946 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -54,8 +54,6 @@ dasd_ioctl_enable(struct block_device *bdev)
return -ENODEV;
dasd_enable_device(base);
- /* Formatting the dasd device can change the capacity. */
- bd_set_nr_sectors(bdev, get_capacity(base->block->gdp));
dasd_put_device(base);
return 0;
}
@@ -88,7 +86,7 @@ dasd_ioctl_disable(struct block_device *bdev)
* Set i_size to zero, since read, write, etc. check against this
* value.
*/
- bd_set_nr_sectors(bdev, 0);
+ set_capacity(bdev->bd_disk, 0);
dasd_put_device(base);
return 0;
}
@@ -222,9 +220,8 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata)
* enabling the device later.
*/
if (fdata->start_unit == 0) {
- struct block_device *bdev = bdget_disk(block->gdp, 0);
- bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
- bdput(bdev);
+ block->gdp->part0->bd_inode->i_blkbits =
+ blksize_bits(fdata->blksize);
}
rc = base->discipline->format_device(base, fdata, 1);
@@ -532,28 +529,22 @@ static int dasd_ioctl_information(struct dasd_block *block, void __user *argp,
/*
* Set read only
*/
-static int
-dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
+int dasd_set_read_only(struct block_device *bdev, bool ro)
{
struct dasd_device *base;
- int intval, rc;
+ int rc;
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ /* do not manipulate hardware state for partitions */
if (bdev_is_partition(bdev))
- // ro setting is not allowed for partitions
- return -EINVAL;
- if (get_user(intval, (int __user *)argp))
- return -EFAULT;
+ return 0;
+
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
- if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
- dasd_put_device(base);
- return -EROFS;
- }
- set_disk_ro(bdev->bd_disk, intval);
- rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval);
+ if (!ro && test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
+ rc = -EROFS;
+ else
+ rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, ro);
dasd_put_device(base);
return rc;
}
@@ -633,9 +624,6 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
case BIODASDPRRST:
rc = dasd_ioctl_reset_profile(block);
break;
- case BLKROSET:
- rc = dasd_ioctl_set_ro(bdev, argp);
- break;
case DASDAPIVER:
rc = dasd_ioctl_api_version(argp);
break;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index dfcbe54591fb..8d0de6adcad0 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -384,6 +384,20 @@ static ssize_t chp_chid_external_show(struct device *dev,
}
static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+static ssize_t chp_esc_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ rc = sprintf(buf, "%x\n", chp->desc_fmt1.esc);
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(esc, 0444, chp_esc_show, NULL);
+
static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -414,6 +428,7 @@ static struct attribute *chp_attrs[] = {
&dev_attr_shared.attr,
&dev_attr_chid.attr,
&dev_attr_chid_external.attr,
+ &dev_attr_esc.attr,
NULL,
};
static struct attribute_group chp_attr_group = {
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 20259f3fbf45..7ee9eba0abcb 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -23,6 +23,7 @@
#define CHP_OFFLINE 1
#define CHP_VARY_ON 2
#define CHP_VARY_OFF 3
+#define CHP_FCES_EVENT 4
struct chp_link {
struct chp_id chpid;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index fc06a4002168..c22d9ee27ba1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -37,6 +37,9 @@ static void *sei_page;
static void *chsc_page;
static DEFINE_SPINLOCK(chsc_page_lock);
+#define SEI_VF_FLA 0xc0 /* VF flag for Full Link Address */
+#define SEI_RS_CHPID 0x4 /* 4 in RS field indicates CHPID */
+
/**
* chsc_error_from_response() - convert a chsc response to an error
* @response: chsc response code
@@ -287,6 +290,15 @@ static void s390_process_res_acc(struct chp_link *link)
css_schedule_reprobe();
}
+static int process_fces_event(struct subchannel *sch, void *data)
+{
+ spin_lock_irq(sch->lock);
+ if (sch->driver && sch->driver->chp_event)
+ sch->driver->chp_event(sch, data, CHP_FCES_EVENT);
+ spin_unlock_irq(sch->lock);
+ return 0;
+}
+
struct chsc_sei_nt0_area {
u8 flags;
u8 vf; /* validity flags */
@@ -364,6 +376,16 @@ static char *store_ebcdic(char *dest, const char *src, unsigned long len,
return dest + len;
}
+static void chsc_link_from_sei(struct chp_link *link,
+ struct chsc_sei_nt0_area *sei_area)
+{
+ if ((sei_area->vf & SEI_VF_FLA) != 0) {
+ link->fla = sei_area->fla;
+ link->fla_mask = ((sei_area->vf & SEI_VF_FLA) == SEI_VF_FLA) ?
+ 0xffff : 0xff00;
+ }
+}
+
/* Format node ID and parameters for output in LIR log message. */
static void format_node_data(char *params, char *id, struct node_descriptor *nd)
{
@@ -453,15 +475,7 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
}
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
- if ((sei_area->vf & 0xc0) != 0) {
- link.fla = sei_area->fla;
- if ((sei_area->vf & 0xc0) == 0xc0)
- /* full link address */
- link.fla_mask = 0xffff;
- else
- /* link address */
- link.fla_mask = 0xff00;
- }
+ chsc_link_from_sei(&link, sei_area);
s390_process_res_acc(&link);
}
@@ -570,6 +584,33 @@ static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
ap_bus_cfg_chg();
}
+static void chsc_process_sei_fces_event(struct chsc_sei_nt0_area *sei_area)
+{
+ struct chp_link link;
+ struct chp_id chpid;
+ struct channel_path *chp;
+
+ CIO_CRW_EVENT(4,
+ "chsc: FCES status notification (rs=%02x, rs_id=%04x, FCES-status=%x)\n",
+ sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
+
+ if (sei_area->rs != SEI_RS_CHPID)
+ return;
+ chp_id_init(&chpid);
+ chpid.id = sei_area->rsid;
+
+ /* Ignore the event on unknown/invalid chp */
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ return;
+
+ memset(&link, 0, sizeof(struct chp_link));
+ link.chpid = chpid;
+ chsc_link_from_sei(&link, sei_area);
+
+ for_each_subchannel_staged(process_fces_event, NULL, &link);
+}
+
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
switch (sei_area->cc) {
@@ -611,6 +652,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
case 14: /* scm available notification */
chsc_process_sei_scm_avail(sei_area);
break;
+ case 15: /* FCES event notification */
+ chsc_process_sei_fces_event(sei_area);
+ break;
default: /* other stuff */
CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc);
@@ -1428,3 +1472,86 @@ int chsc_sgib(u32 origin)
return ret;
}
EXPORT_SYMBOL_GPL(chsc_sgib);
+
+#define SCUD_REQ_LEN 0x10 /* SCUD request block length */
+#define SCUD_REQ_CMD 0x4b /* SCUD Command Code */
+
+struct chse_cudb {
+ u16 flags:8;
+ u16 chp_valid:8;
+ u16 cu;
+ u32 esm_valid:8;
+ u32:24;
+ u8 chpid[8];
+ u32:32;
+ u32:32;
+ u8 esm[8];
+ u32 efla[8];
+} __packed;
+
+struct chsc_scud {
+ struct chsc_header request;
+ u16:4;
+ u16 fmt:4;
+ u16 cssid:8;
+ u16 first_cu;
+ u16:16;
+ u16 last_cu;
+ u32:32;
+ struct chsc_header response;
+ u16:4;
+ u16 fmt_resp:4;
+ u32:24;
+ struct chse_cudb cudb[];
+} __packed;
+
+/**
+ * chsc_scud() - Store control-unit description.
+ * @cu: number of the control-unit
+ * @esm: 8 1-byte endpoint security mode values
+ * @esm_valid: validity mask for @esm
+ *
+ * Interface to retrieve information about the endpoint security
+ * modes for up to 8 paths of a control unit.
+ *
+ * Returns 0 on success.
+ */
+int chsc_scud(u16 cu, u64 *esm, u8 *esm_valid)
+{
+ struct chsc_scud *scud = chsc_page;
+ int ret;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scud->request.length = SCUD_REQ_LEN;
+ scud->request.code = SCUD_REQ_CMD;
+ scud->fmt = 0;
+ scud->cssid = 0;
+ scud->first_cu = cu;
+ scud->last_cu = cu;
+
+ ret = chsc(scud);
+ if (!ret)
+ ret = chsc_error_from_response(scud->response.code);
+
+ if (!ret && (scud->response.length <= 8 || scud->fmt_resp != 0
+ || !(scud->cudb[0].flags & 0x80)
+ || scud->cudb[0].cu != cu)) {
+
+ CIO_MSG_EVENT(2, "chsc: scud failed rc=%04x, L2=%04x "
+ "FMT=%04x, cudb.flags=%02x, cudb.cu=%04x",
+ scud->response.code, scud->response.length,
+ scud->fmt_resp, scud->cudb[0].flags, scud->cudb[0].cu);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto out;
+
+ memcpy(esm, scud->cudb[0].esm, sizeof(*esm));
+ *esm_valid = scud->cudb[0].esm_valid;
+out:
+ spin_unlock_irq(&chsc_page_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_scud);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index c2b83b68bc57..32fa7faa5bf6 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -27,7 +27,8 @@ struct channel_path_desc_fmt1 {
u8 lsn;
u8 desc;
u8 chpid;
- u32:24;
+ u32:16;
+ u8 esc;
u8 chpp;
u32 unused[2];
u16 chid;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e0005a4fc978..4b0a7cbb2096 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1156,7 +1156,8 @@ static int io_subchannel_chp_event(struct subchannel *sch,
struct chp_link *link, int event)
{
struct ccw_device *cdev = sch_get_cdev(sch);
- int mask;
+ int mask, chpid, valid_bit;
+ int path_event[8];
mask = chp_ssd_get_mask(&sch->ssd_info, link);
if (!mask)
@@ -1191,6 +1192,18 @@ static int io_subchannel_chp_event(struct subchannel *sch,
cdev->private->path_new_mask |= mask;
io_subchannel_verify(sch);
break;
+ case CHP_FCES_EVENT:
+ /* Forward Endpoint Security event */
+ for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++,
+ valid_bit >>= 1) {
+ if (mask & valid_bit)
+ path_event[chpid] = PE_PATH_FCES_EVENT;
+ else
+ path_event[chpid] = PE_NONE;
+ }
+ if (cdev)
+ cdev->drv->path_event(cdev, path_event);
+ break;
}
return 0;
}
@@ -1668,7 +1681,7 @@ void ccw_device_wait_idle(struct ccw_device *cdev)
cio_tsch(sch);
if (sch->schib.scsw.cmd.actl == 0)
break;
- udelay_simple(100);
+ udelay(100);
}
}
#endif
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 8b3ed5b45277..68106be4ba7a 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -394,6 +394,7 @@ static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
switch (info->index) {
case VFIO_CCW_IO_IRQ_INDEX:
case VFIO_CCW_CRW_IRQ_INDEX:
+ case VFIO_CCW_REQ_IRQ_INDEX:
info->count = 1;
info->flags = VFIO_IRQ_INFO_EVENTFD;
break;
@@ -424,6 +425,9 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
case VFIO_CCW_CRW_IRQ_INDEX:
ctx = &private->crw_trigger;
break;
+ case VFIO_CCW_REQ_IRQ_INDEX:
+ ctx = &private->req_trigger;
+ break;
default:
return -EINVAL;
}
@@ -607,6 +611,27 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
}
}
+/* Request removal of the device*/
+static void vfio_ccw_mdev_request(struct mdev_device *mdev, unsigned int count)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev));
+
+ if (!private)
+ return;
+
+ if (private->req_trigger) {
+ if (!(count % 10))
+ dev_notice_ratelimited(mdev_dev(private->mdev),
+ "Relaying device request to user (#%u)\n",
+ count);
+
+ eventfd_signal(private->req_trigger, 1);
+ } else if (count == 0) {
+ dev_notice(mdev_dev(private->mdev),
+ "No device request channel registered, blocked until released by user\n");
+ }
+}
+
static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
.owner = THIS_MODULE,
.supported_type_groups = mdev_type_groups,
@@ -617,6 +642,7 @@ static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
.read = vfio_ccw_mdev_read,
.write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl,
+ .request = vfio_ccw_mdev_request,
};
int vfio_ccw_mdev_reg(struct subchannel *sch)
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 8723156b29ea..b2c762eb42b9 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -84,7 +84,10 @@ struct vfio_ccw_crw {
* @irb: irb info received from interrupt
* @scsw: scsw info
* @io_trigger: eventfd ctx for signaling userspace I/O results
+ * @crw_trigger: eventfd ctx for signaling userspace CRW information
+ * @req_trigger: eventfd ctx for signaling userspace to return device
* @io_work: work for deferral process of I/O handling
+ * @crw_work: work for deferral process of CRW handling
*/
struct vfio_ccw_private {
struct subchannel *sch;
@@ -108,6 +111,7 @@ struct vfio_ccw_private {
struct eventfd_ctx *io_trigger;
struct eventfd_ctx *crw_trigger;
+ struct eventfd_ctx *req_trigger;
struct work_struct io_work;
struct work_struct crw_work;
} __aligned(8);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index be2520cc010b..7dc72cb718b0 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -71,15 +71,11 @@ static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
{
struct vfio_ap_queue *q;
- int apid, apqi;
mutex_lock(&matrix_dev->lock);
q = dev_get_drvdata(&apdev->device);
+ vfio_ap_mdev_reset_queue(q, 1);
dev_set_drvdata(&apdev->device, NULL);
- apid = AP_QID_CARD(q->apqn);
- apqi = AP_QID_QUEUE(q->apqn);
- vfio_ap_mdev_reset_queue(apid, apqi, 1);
- vfio_ap_irq_disable(q);
kfree(q);
mutex_unlock(&matrix_dev->lock);
}
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index e0bde8518745..41fc2e4135fe 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -25,6 +25,7 @@
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static int match_apqn(struct device *dev, const void *data)
{
@@ -49,20 +50,15 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
int apqn)
{
struct vfio_ap_queue *q;
- struct device *dev;
if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
return NULL;
if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
return NULL;
- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &apqn, match_apqn);
- if (!dev)
- return NULL;
- q = dev_get_drvdata(dev);
- q->matrix_mdev = matrix_mdev;
- put_device(dev);
+ q = vfio_ap_find_queue(apqn);
+ if (q)
+ q->matrix_mdev = matrix_mdev;
return q;
}
@@ -119,13 +115,18 @@ static void vfio_ap_wait_for_irqclear(int apqn)
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
- if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
+ if (!q)
+ return;
+ if (q->saved_isc != VFIO_AP_ISC_INVALID &&
+ !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
- if (q->saved_pfn && q->matrix_mdev)
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ }
+ if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
&q->saved_pfn, 1);
- q->saved_pfn = 0;
- q->saved_isc = VFIO_AP_ISC_INVALID;
+ q->saved_pfn = 0;
+ }
}
/**
@@ -144,7 +145,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
* Returns if ap_aqic function failed with invalid, deconfigured or
* checkstopped AP.
*/
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
{
struct ap_qirq_ctrl aqic_gisa = {};
struct ap_queue_status status;
@@ -1037,19 +1038,14 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
{
struct ap_matrix_mdev *m;
- mutex_lock(&matrix_dev->lock);
-
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
- if ((m != matrix_mdev) && (m->kvm == kvm)) {
- mutex_unlock(&matrix_dev->lock);
+ if ((m != matrix_mdev) && (m->kvm == kvm))
return -EPERM;
- }
}
matrix_mdev->kvm = kvm;
kvm_get_kvm(kvm);
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
- mutex_unlock(&matrix_dev->lock);
return 0;
}
@@ -1083,79 +1079,118 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+{
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+}
+
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- int ret;
+ int ret, notify_rc = NOTIFY_OK;
struct ap_matrix_mdev *matrix_mdev;
if (action != VFIO_GROUP_NOTIFY_SET_KVM)
return NOTIFY_OK;
matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+ mutex_lock(&matrix_dev->lock);
if (!data) {
- matrix_mdev->kvm = NULL;
- return NOTIFY_OK;
+ if (matrix_mdev->kvm)
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
+ goto notify_done;
}
ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
- if (ret)
- return NOTIFY_DONE;
+ if (ret) {
+ notify_rc = NOTIFY_DONE;
+ goto notify_done;
+ }
/* If there is no CRYCB pointer, then we can't copy the masks */
- if (!matrix_mdev->kvm->arch.crypto.crycbd)
- return NOTIFY_DONE;
+ if (!matrix_mdev->kvm->arch.crypto.crycbd) {
+ notify_rc = NOTIFY_DONE;
+ goto notify_done;
+ }
kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm,
matrix_mdev->matrix.adm);
- return NOTIFY_OK;
+notify_done:
+ mutex_unlock(&matrix_dev->lock);
+ return notify_rc;
}
-static void vfio_ap_irq_disable_apqn(int apqn)
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
struct device *dev;
- struct vfio_ap_queue *q;
+ struct vfio_ap_queue *q = NULL;
dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
&apqn, match_apqn);
if (dev) {
q = dev_get_drvdata(dev);
- vfio_ap_irq_disable(q);
put_device(dev);
}
+
+ return q;
}
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
unsigned int retry)
{
struct ap_queue_status status;
+ int ret;
int retry2 = 2;
- int apqn = AP_MKQID(apid, apqi);
- do {
- status = ap_zapq(apqn);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- while (!status.queue_empty && retry2--) {
- msleep(20);
- status = ap_tapq(apqn, NULL);
- }
- WARN_ON_ONCE(retry2 <= 0);
- return 0;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_BUSY:
+ if (!q)
+ return 0;
+
+retry_zapq:
+ status = ap_zapq(q->apqn);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ ret = 0;
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (retry--) {
msleep(20);
- break;
- default:
- /* things are really broken, give up */
- return -EIO;
+ goto retry_zapq;
}
- } while (retry--);
+ ret = -EBUSY;
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ WARN_ON_ONCE(status.irq_enabled);
+ ret = -EBUSY;
+ goto free_resources;
+ default:
+ /* things are really broken, give up */
+ WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+ status.response_code);
+ return -EIO;
+ }
+
+ /* wait for the reset to take effect */
+ while (retry2--) {
+ if (status.queue_empty && !status.irq_enabled)
+ break;
+ msleep(20);
+ status = ap_tapq(q->apqn, NULL);
+ }
+ WARN_ON_ONCE(retry2 <= 0);
- return -EBUSY;
+free_resources:
+ vfio_ap_free_aqic_resources(q);
+
+ return ret;
}
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
@@ -1163,13 +1198,15 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
int ret;
int rc = 0;
unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
matrix_mdev->matrix.apm_max + 1) {
for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
matrix_mdev->matrix.aqm_max + 1) {
- ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
+ ret = vfio_ap_mdev_reset_queue(q, 1);
/*
* Regardless whether a queue turns out to be busy, or
* is not operational, we need to continue resetting
@@ -1177,7 +1214,6 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
*/
if (ret)
rc = ret;
- vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
}
}
@@ -1222,13 +1258,8 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mutex_lock(&matrix_dev->lock);
- if (matrix_mdev->kvm) {
- kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
- matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
- vfio_ap_mdev_reset_queues(mdev);
- kvm_put_kvm(matrix_mdev->kvm);
- matrix_mdev->kvm = NULL;
- }
+ if (matrix_mdev->kvm)
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
mutex_unlock(&matrix_dev->lock);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index f46dde56b464..28e9d9989768 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -88,11 +88,6 @@ struct ap_matrix_mdev {
struct mdev_device *mdev;
};
-extern int vfio_ap_mdev_register(void);
-extern void vfio_ap_mdev_unregister(void);
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
- unsigned int retry);
-
struct vfio_ap_queue {
struct ap_matrix_mdev *matrix_mdev;
unsigned long saved_pfn;
@@ -100,5 +95,10 @@ struct vfio_ap_queue {
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
};
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q);
+
+int vfio_ap_mdev_register(void);
+void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry);
+
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 226a5612e855..62ceeb7fc125 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -175,7 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
atomic_set(&zq->load, 0);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = CEX2A_CLEANUP_TIME,
+ aq->request_timeout = CEX2A_CLEANUP_TIME;
aq->private = zq;
rc = zcrypt_queue_register(zq);
if (rc) {
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index f5195bca1d85..f4a6d3744241 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -631,7 +631,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
atomic_set(&zq->load, 0);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = CEX4_CLEANUP_TIME,
+ aq->request_timeout = CEX4_CLEANUP_TIME;
aq->private = zq;
rc = zcrypt_queue_register(zq);
if (rc) {
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6f5ddc3eab8c..28f637042d44 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1079,7 +1079,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_set_offline(struct qeth_card *card, bool resetting);
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f4b60294a969..cf18d87da41e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5507,12 +5507,12 @@ out:
return rc;
}
-static int qeth_set_online(struct qeth_card *card)
+static int qeth_set_online(struct qeth_card *card,
+ const struct qeth_discipline *disc)
{
bool carrier_ok;
int rc;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
@@ -5529,7 +5529,7 @@ static int qeth_set_online(struct qeth_card *card)
/* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
- rc = card->discipline->set_online(card, carrier_ok);
+ rc = disc->set_online(card, carrier_ok);
if (rc)
goto err_online;
@@ -5537,7 +5537,6 @@ static int qeth_set_online(struct qeth_card *card)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
err_online:
@@ -5552,15 +5551,14 @@ err_hardsetup:
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-int qeth_set_offline(struct qeth_card *card, bool resetting)
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting)
{
int rc, rc2, rc3;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 3, "setoffl");
@@ -5581,7 +5579,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
cancel_work_sync(&card->rx_mode_work);
- card->discipline->set_offline(card);
+ disc->set_offline(card);
qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
@@ -5602,16 +5600,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);
static int qeth_do_reset(void *data)
{
+ const struct qeth_discipline *disc;
struct qeth_card *card = data;
int rc;
+ /* Lock-free, other users will block until we are done. */
+ disc = card->discipline;
+
QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
@@ -5619,8 +5620,8 @@ static int qeth_do_reset(void *data)
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_offline(card, true);
- rc = qeth_set_online(card);
+ qeth_set_offline(card, disc, true);
+ rc = qeth_set_online(card, disc);
if (!rc) {
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
@@ -6584,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
+ /* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@@ -6616,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
+ mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6634,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@@ -6647,16 +6652,23 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
- rc = qeth_set_online(card);
+ rc = qeth_set_online(card, card->discipline);
+
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- return qeth_set_offline(card, false);
+ mutex_lock(&card->discipline_mutex);
+ rc = qeth_set_offline(card, card->discipline, false);
+ mutex_unlock(&card->discipline_mutex);
+
+ return rc;
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 4ed0fb0705a5..4254caf1d9b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d138ac432d01..4c2cae7ae9a7 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1813,7 +1813,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- if (qeth_get_ip_version(skb) != 4)
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return qeth_features_check(skb, dev, features);
}
@@ -1971,7 +1971,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 18b713a616de..768873dd55b8 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -292,6 +292,14 @@ static void _zfcp_status_read_scheduler(struct work_struct *work)
stat_work));
}
+static void zfcp_version_change_lost_work(struct work_struct *work)
+{
+ struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+ version_change_lost_work);
+
+ zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL);
+}
+
static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
{
struct zfcp_adapter *adapter =
@@ -353,6 +361,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_DELAYED_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
+ INIT_WORK(&adapter->version_change_lost_work,
+ zfcp_version_change_lost_work);
adapter->next_port_scan = jiffies;
@@ -429,6 +439,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
cancel_delayed_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
cancel_work_sync(&adapter->ns_up_work);
+ cancel_work_sync(&adapter->version_change_lost_work);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_fc_wka_ports_force_offline(adapter->gs);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 5069b555c6c1..26c89c232ef2 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -200,6 +200,7 @@ struct zfcp_adapter {
struct zfcp_fc_events events;
unsigned long next_port_scan;
struct zfcp_diag_adapter *diagnostics;
+ struct work_struct version_change_lost_work;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index fdac6350c579..58879213f225 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -20,8 +20,6 @@ extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
-extern void zfcp_sg_free_table(struct scatterlist *, int);
-extern int zfcp_sg_setup_table(struct scatterlist *, int);
extern void zfcp_adapter_release(struct kref *);
extern void zfcp_adapter_unregister(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 6cb963a06777..485028324eae 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -242,6 +242,19 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
}
}
+static void
+zfcp_fsf_status_read_version_change(struct zfcp_adapter *adapter,
+ struct fsf_status_read_buffer *sr_buf)
+{
+ if (sr_buf->status_subtype == FSF_STATUS_READ_SUB_LIC_CHANGE) {
+ u32 version = sr_buf->payload.version_change.current_version;
+
+ WRITE_ONCE(adapter->fsf_lic_version, version);
+ snprintf(fc_host_firmware_version(adapter->scsi_host),
+ FC_VERSION_STRING_SIZE, "%#08x", version);
+ }
+}
+
static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
@@ -296,10 +309,16 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
case FSF_STATUS_READ_NOTIFICATION_LOST:
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
zfcp_fc_conditional_port_scan(adapter);
+ if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_VERSION_CHANGE)
+ queue_work(adapter->work_queue,
+ &adapter->version_change_lost_work);
break;
case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
adapter->adapter_features = sr_buf->payload.word[0];
break;
+ case FSF_STATUS_READ_VERSION_CHANGE:
+ zfcp_fsf_status_read_version_change(adapter, sr_buf);
+ break;
}
mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
@@ -2359,8 +2378,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
}
}
- blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
- sizeof(blktrc));
+ blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc));
}
/**
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 09d73d0061ef..5e6b601af980 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -134,6 +134,7 @@
#define FSF_STATUS_READ_LINK_UP 0x00000006
#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009
#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
+#define FSF_STATUS_READ_VERSION_CHANGE 0x0000000D
/* status subtypes for link down */
#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000
@@ -142,6 +143,10 @@
/* status subtypes for unsolicited status notification lost */
#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001
+#define FSF_STATUS_READ_SUB_VERSION_CHANGE 0x00000100
+
+/* status subtypes for version change */
+#define FSF_STATUS_READ_SUB_LIC_CHANGE 0x00000001
/* topologie that is detected by the adapter */
#define FSF_TOPO_P2P 0x00000001
@@ -226,6 +231,11 @@ struct fsf_link_down_info {
u8 vendor_specific_code;
} __attribute__ ((packed));
+struct fsf_version_change {
+ u32 current_version;
+ u32 previous_version;
+} __packed;
+
struct fsf_status_read_buffer {
u32 status_type;
u32 status_subtype;
@@ -242,6 +252,7 @@ struct fsf_status_read_buffer {
u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)];
struct fsf_link_down_info link_down_info;
struct fsf_bit_error_payload bit_error;
+ struct fsf_version_change version_change;
} payload;
} __attribute__ ((packed));
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index a8a514074084..23ab16d65f2a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -10,6 +10,7 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/lockdep.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "zfcp_ext.h"
@@ -131,6 +132,33 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
}
+static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
+{
+ struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
+ struct ccw_device *cdev = qdio->adapter->ccw_device;
+ unsigned int start, error;
+ int completed;
+
+ /* Check the Response Queue, and kick off the Request Queue tasklet: */
+ completed = qdio_get_next_buffers(cdev, 0, &start, &error);
+ if (completed < 0)
+ return;
+ if (completed > 0)
+ zfcp_qdio_int_resp(cdev, error, 0, start, completed,
+ (unsigned long) qdio);
+
+ if (qdio_start_irq(cdev))
+ /* More work pending: */
+ tasklet_schedule(&qdio->irq_tasklet);
+}
+
+static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
+{
+ struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
+
+ tasklet_schedule(&qdio->irq_tasklet);
+}
+
static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
@@ -256,6 +284,13 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
int retval;
u8 sbal_number = q_req->sbal_number;
+ /*
+ * This should actually be a spin_lock_bh(stat_lock), to protect against
+ * zfcp_qdio_int_req() in tasklet context.
+ * But we can't do so (and are safe), as we always get called with IRQs
+ * disabled by spin_lock_irq[save](req_q_lock).
+ */
+ lockdep_assert_irqs_disabled();
spin_lock(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock(&qdio->stat_lock);
@@ -332,6 +367,8 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
wake_up(&qdio->req_q_wq);
+ tasklet_disable(&qdio->irq_tasklet);
+ qdio_stop_irq(adapter->ccw_device);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
@@ -387,6 +424,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
init_data.no_output_qs = 1;
init_data.input_handler = zfcp_qdio_int_resp;
init_data.output_handler = zfcp_qdio_int_req;
+ init_data.irq_poll = zfcp_qdio_poll;
init_data.int_parm = (unsigned long) qdio;
init_data.input_sbal_addr_array = input_sbals;
init_data.output_sbal_addr_array = output_sbals;
@@ -433,6 +471,11 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+ /* Enable processing for QDIO interrupts: */
+ tasklet_enable(&qdio->irq_tasklet);
+ /* This results in a qdio_start_irq(): */
+ tasklet_schedule(&qdio->irq_tasklet);
+
zfcp_qdio_shost_update(adapter, qdio);
return 0;
@@ -450,6 +493,8 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
if (!qdio)
return;
+ tasklet_kill(&qdio->irq_tasklet);
+
if (qdio->adapter->ccw_device)
qdio_free(qdio->adapter->ccw_device);
@@ -475,6 +520,8 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter)
spin_lock_init(&qdio->req_q_lock);
spin_lock_init(&qdio->stat_lock);
+ tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
+ tasklet_disable(&qdio->irq_tasklet);
adapter->qdio = qdio;
return 0;
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 6b43d6b254be..9c1f310db155 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -10,6 +10,7 @@
#ifndef ZFCP_QDIO_H
#define ZFCP_QDIO_H
+#include <linux/interrupt.h>
#include <asm/qdio.h>
#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
@@ -44,6 +45,7 @@ struct zfcp_qdio {
u64 req_q_util;
atomic_t req_q_full;
wait_queue_head_t req_q_wq;
+ struct tasklet_struct irq_tasklet;
struct zfcp_adapter *adapter;
u16 max_sbale_per_sbal;
u16 max_sbale_per_req;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3337b1e80412..b4718a1b2bd6 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2191,10 +2191,10 @@ static void twa_remove(struct pci_dev *pdev)
twa_device_extension_count--;
} /* End twa_remove() */
-#ifdef CONFIG_PM
/* This function is called on PCI suspend */
-static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused twa_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
@@ -2214,32 +2214,19 @@ static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
}
TW_CLEAR_ALL_INTERRUPTS(tw_dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
} /* End twa_suspend() */
/* This function is called on PCI resume */
-static int twa_resume(struct pci_dev *pdev)
+static int __maybe_unused twa_resume(struct device *dev)
{
int retval = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- retval = pci_enable_device(pdev);
- if (retval) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
- return retval;
- }
-
- pci_set_master(pdev);
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -2277,11 +2264,9 @@ static int twa_resume(struct pci_dev *pdev)
out_disable_device:
scsi_remove_host(host);
- pci_disable_device(pdev);
return retval;
} /* End twa_resume() */
-#endif
/* PCI Devices supported by this driver */
static struct pci_device_id twa_pci_tbl[] = {
@@ -2297,16 +2282,15 @@ static struct pci_device_id twa_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
+static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume);
+
/* pci_driver initializer */
static struct pci_driver twa_driver = {
.name = "3w-9xxx",
.id_table = twa_pci_tbl,
.probe = twa_probe,
.remove = twa_remove,
-#ifdef CONFIG_PM
- .suspend = twa_suspend,
- .resume = twa_resume,
-#endif
+ .driver.pm = &twa_pm_ops,
.shutdown = twa_shutdown
};
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index dda6fa857709..b8f1848ecef2 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1756,11 +1756,10 @@ static void twl_remove(struct pci_dev *pdev)
twl_device_extension_count--;
} /* End twl_remove() */
-#ifdef CONFIG_PM
/* This function is called on PCI suspend */
-static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused twl_suspend(struct device *dev)
{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct Scsi_Host *host = dev_get_drvdata(dev);
TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
@@ -1779,32 +1778,18 @@ static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
/* Clear doorbell interrupt */
TWL_CLEAR_DB_INTERRUPT(tw_dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
} /* End twl_suspend() */
/* This function is called on PCI resume */
-static int twl_resume(struct pci_dev *pdev)
+static int __maybe_unused twl_resume(struct device *dev)
{
int retval = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- retval = pci_enable_device(pdev);
- if (retval) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
- return retval;
- }
-
- pci_set_master(pdev);
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -1842,11 +1827,9 @@ static int twl_resume(struct pci_dev *pdev)
out_disable_device:
scsi_remove_host(host);
- pci_disable_device(pdev);
return retval;
} /* End twl_resume() */
-#endif
/* PCI Devices supported by this driver */
static struct pci_device_id twl_pci_tbl[] = {
@@ -1855,16 +1838,15 @@ static struct pci_device_id twl_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
+static SIMPLE_DEV_PM_OPS(twl_pm_ops, twl_suspend, twl_resume);
+
/* pci_driver initializer */
static struct pci_driver twl_driver = {
.name = "3w-sas",
.id_table = twl_pci_tbl,
.probe = twl_probe,
.remove = twl_remove,
-#ifdef CONFIG_PM
- .suspend = twl_suspend,
- .resume = twl_resume,
-#endif
+ .driver.pm = &twl_pm_ops,
.shutdown = twl_shutdown
};
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d654a6cc4162..2ddbcaa667d1 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -132,7 +132,7 @@
static unsigned int disconnect_mask = ~0;
module_param(disconnect_mask, int, 0444);
-static int do_abort(struct Scsi_Host *);
+static int do_abort(struct Scsi_Host *, unsigned int);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@@ -197,7 +197,7 @@ static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
* @reg2: Second 5380 register to poll
* @bit2: Second bitmask to check
* @val2: Second expected value
- * @wait: Time-out in jiffies
+ * @wait: Time-out in jiffies, 0 if sleeping is not allowed
*
* Polls the chip in a reasonably efficient manner waiting for an
* event to occur. After a short quick poll we begin to yield the CPU
@@ -223,7 +223,7 @@ static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata,
cpu_relax();
} while (n--);
- if (irqs_disabled() || in_interrupt())
+ if (!wait)
return -ETIMEDOUT;
/* Repeatedly sleep for 1 ms until deadline */
@@ -486,7 +486,7 @@ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
break;
case 2:
shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n");
- do_abort(instance);
+ do_abort(instance, 1);
break;
case 4:
shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n");
@@ -580,11 +580,14 @@ static int NCR5380_queue_command(struct Scsi_Host *instance,
cmd->result = 0;
- if (!NCR5380_acquire_dma_irq(instance))
- return SCSI_MLQUEUE_HOST_BUSY;
-
spin_lock_irqsave(&hostdata->lock, flags);
+ if (!NCR5380_acquire_dma_irq(instance)) {
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
/*
* Insert the cmd into the issue queue. Note that REQUEST SENSE
* commands are added to the head of the queue since any command will
@@ -722,7 +725,6 @@ static void NCR5380_main(struct work_struct *work)
if (!NCR5380_select(instance, cmd)) {
dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
- maybe_release_dma_irq(instance);
} else {
dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
"main: select failed, returning %p to queue\n", cmd);
@@ -734,8 +736,10 @@ static void NCR5380_main(struct work_struct *work)
NCR5380_information_transfer(instance);
done = 0;
}
- if (!hostdata->connected)
+ if (!hostdata->connected) {
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ maybe_release_dma_irq(instance);
+ }
spin_unlock_irq(&hostdata->lock);
if (!done)
cond_resched();
@@ -818,7 +822,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
if (toPIO > 0) {
dsprintk(NDEBUG_DMA, instance,
"Doing %d byte PIO to 0x%p\n", cnt, *data);
- NCR5380_transfer_pio(instance, &p, &cnt, data);
+ NCR5380_transfer_pio(instance, &p, &cnt, data, 0);
*count -= toPIO - cnt;
}
}
@@ -1185,7 +1189,7 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
goto out;
}
if (!hostdata->selecting) {
- do_abort(instance);
+ do_abort(instance, 0);
return false;
}
@@ -1196,7 +1200,7 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
len = 1;
data = tmp;
phase = PHASE_MSGOUT;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
if (len) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
cmd->result = DID_ERROR << 16;
@@ -1234,7 +1238,8 @@ out:
*
* Inputs : instance - instance of driver, *phase - pointer to
* what phase is expected, *count - pointer to number of
- * bytes to transfer, **data - pointer to data pointer.
+ * bytes to transfer, **data - pointer to data pointer,
+ * can_sleep - 1 or 0 when sleeping is permitted or not, respectively.
*
* Returns : -1 when different phase is entered without transferring
* maximum number of bytes, 0 if all bytes are transferred or exit
@@ -1253,7 +1258,7 @@ out:
static int NCR5380_transfer_pio(struct Scsi_Host *instance,
unsigned char *phase, int *count,
- unsigned char **data)
+ unsigned char **data, unsigned int can_sleep)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char p = *phase, tmp;
@@ -1274,7 +1279,8 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
* valid
*/
- if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
+ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+ HZ * can_sleep) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1320,7 +1326,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
}
if (NCR5380_poll_politely(hostdata,
- STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
+ STATUS_REG, SR_REQ, 0, 5 * HZ * can_sleep) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n");
@@ -1395,11 +1401,12 @@ static void do_reset(struct Scsi_Host *instance)
* do_abort - abort the currently established nexus by going to
* MESSAGE OUT phase and sending an ABORT message.
* @instance: relevant scsi host instance
+ * @can_sleep: 1 or 0 when sleeping is permitted or not, respectively
*
* Returns 0 on success, negative error code on failure.
*/
-static int do_abort(struct Scsi_Host *instance)
+static int do_abort(struct Scsi_Host *instance, unsigned int can_sleep)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *msgptr, phase, tmp;
@@ -1419,7 +1426,8 @@ static int do_abort(struct Scsi_Host *instance)
* the target sees, so we just handshake.
*/
- rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+ 10 * HZ * can_sleep);
if (rc < 0)
goto out;
@@ -1430,7 +1438,8 @@ static int do_abort(struct Scsi_Host *instance)
if (tmp != PHASE_MSGOUT) {
NCR5380_write(INITIATOR_COMMAND_REG,
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
- rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0,
+ 3 * HZ * can_sleep);
if (rc < 0)
goto out;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1440,7 +1449,7 @@ static int do_abort(struct Scsi_Host *instance)
msgptr = &tmp;
len = 1;
phase = PHASE_MSGOUT;
- NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
+ NCR5380_transfer_pio(instance, &phase, &len, &msgptr, can_sleep);
if (len)
rc = -ENXIO;
@@ -1619,12 +1628,12 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
*/
if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
- BASR_DRQ, BASR_DRQ, HZ) < 0) {
+ BASR_DRQ, BASR_DRQ, 0) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
}
if (NCR5380_poll_politely(hostdata, STATUS_REG,
- SR_REQ, 0, HZ) < 0) {
+ SR_REQ, 0, 0) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
}
@@ -1636,7 +1645,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
*/
if (NCR5380_poll_politely2(hostdata,
BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
- BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
+ BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n");
}
@@ -1733,7 +1742,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
#if (NDEBUG & NDEBUG_NO_DATAOUT)
shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n");
sink = 1;
- do_abort(instance);
+ do_abort(instance, 0);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
hostdata->connected = NULL;
@@ -1789,7 +1798,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_PIO_CHUNK_SIZE);
len = transfersize;
NCR5380_transfer_pio(instance, &phase, &len,
- (unsigned char **)&cmd->SCp.ptr);
+ (unsigned char **)&cmd->SCp.ptr,
+ 0);
cmd->SCp.this_residual -= transfersize - len;
}
#ifdef CONFIG_SUN3
@@ -1800,7 +1810,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
case PHASE_MSGIN:
len = 1;
data = &tmp;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
cmd->SCp.Message = tmp;
switch (tmp) {
@@ -1841,7 +1851,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
- maybe_release_dma_irq(instance);
return;
case MESSAGE_REJECT:
/* Accept message by clearing ACK */
@@ -1907,7 +1916,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
len = 2;
data = extended_msg + 1;
phase = PHASE_MSGIN;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 1);
dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n",
(int)extended_msg[1],
(int)extended_msg[2]);
@@ -1920,7 +1929,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
data = extended_msg + 3;
phase = PHASE_MSGIN;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 1);
dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n",
len);
@@ -1967,13 +1976,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
len = 1;
data = &msgout;
hostdata->last_message = msgout;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
if (msgout == ABORT) {
hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
- maybe_release_dma_irq(instance);
return;
}
msgout = NOP;
@@ -1986,12 +1994,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* PSEUDO-DMA architecture we should probably
* use the dma transfer function.
*/
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
break;
case PHASE_STATIN:
len = 1;
data = &tmp;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
cmd->SCp.Status = tmp;
break;
default:
@@ -2050,7 +2058,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
if (NCR5380_poll_politely(hostdata,
- STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
+ STATUS_REG, SR_SEL, 0, 0) < 0) {
shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
@@ -2062,12 +2070,12 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
*/
if (NCR5380_poll_politely(hostdata,
- STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
+ STATUS_REG, SR_REQ, SR_REQ, 0) < 0) {
if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
/* BUS FREE phase */
return;
shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
- do_abort(instance);
+ do_abort(instance, 0);
return;
}
@@ -2083,10 +2091,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
unsigned char *data = msg;
unsigned char phase = PHASE_MSGIN;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
if (len) {
- do_abort(instance);
+ do_abort(instance, 0);
return;
}
}
@@ -2096,7 +2104,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got ");
spi_print_msg(msg);
printk("\n");
- do_abort(instance);
+ do_abort(instance, 0);
return;
}
lun = msg[0] & 0x07;
@@ -2136,7 +2144,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
* Since we have an established nexus that we can't do anything
* with, we must abort it.
*/
- if (do_abort(instance) == 0)
+ if (do_abort(instance, 0) == 0)
hostdata->busy[target] &= ~(1 << lun);
return;
}
@@ -2283,7 +2291,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
hostdata->dma_len = 0;
- if (do_abort(instance) < 0) {
+ if (do_abort(instance, 0) < 0) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
result = FAILED;
@@ -2309,7 +2317,6 @@ out:
}
queue_work(hostdata->work_q, &hostdata->main_task);
- maybe_release_dma_irq(instance);
spin_unlock_irqrestore(&hostdata->lock, flags);
return result;
@@ -2365,7 +2372,6 @@ static void bus_reset_cleanup(struct Scsi_Host *instance)
hostdata->dma_len = 0;
queue_work(hostdata->work_q, &hostdata->main_task);
- maybe_release_dma_irq(instance);
}
/**
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 5935fd6d1a05..8a3b41932288 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -277,7 +277,8 @@ static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance);
static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
+static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data,
+ unsigned int can_sleep);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
unsigned int, u8, u8,
unsigned int, u8, u8, unsigned long);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index e3e157a74988..1b1da162f5f6 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -25,6 +25,7 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
+#include <linux/compat.h>
#include <linux/delay.h> /* ssleep prototype */
#include <linux/kthread.h>
#include <linux/uaccess.h>
@@ -226,6 +227,12 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
return status;
}
+struct compat_fib_ioctl {
+ u32 fibctx;
+ s32 wait;
+ compat_uptr_t fib;
+};
+
/**
* next_getadapter_fib - get the next fib
* @dev: adapter to use
@@ -243,8 +250,19 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
struct list_head * entry;
unsigned long flags;
- if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
- return -EFAULT;
+ if (in_compat_syscall()) {
+ struct compat_fib_ioctl cf;
+
+ if (copy_from_user(&cf, arg, sizeof(struct compat_fib_ioctl)))
+ return -EFAULT;
+
+ f.fibctx = cf.fibctx;
+ f.wait = cf.wait;
+ f.fib = compat_ptr(cf.fib);
+ } else {
+ if (copy_from_user(&f, arg, sizeof(struct fib_ioctl)))
+ return -EFAULT;
+ }
/*
* Verify that the HANDLE passed in was a valid AdapterFibContext
*
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index b99ca1b0c553..0ae0d1fa2b50 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1448,6 +1448,7 @@ retry_next:
break;
}
scsi_rescan_device(&device->sdev_gendev);
+ break;
default:
break;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 8f3772480582..3168915adaa7 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1182,63 +1182,6 @@ static long aac_cfg_ioctl(struct file *file,
return aac_do_ioctl(aac, cmd, (void __user *)arg);
}
-#ifdef CONFIG_COMPAT
-static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
-{
- long ret;
- switch (cmd) {
- case FSACTL_MINIPORT_REV_CHECK:
- case FSACTL_SENDFIB:
- case FSACTL_OPEN_GET_ADAPTER_FIB:
- case FSACTL_CLOSE_GET_ADAPTER_FIB:
- case FSACTL_SEND_RAW_SRB:
- case FSACTL_GET_PCI_INFO:
- case FSACTL_QUERY_DISK:
- case FSACTL_DELETE_DISK:
- case FSACTL_FORCE_DELETE_DISK:
- case FSACTL_GET_CONTAINERS:
- case FSACTL_SEND_LARGE_FIB:
- ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
- break;
-
- case FSACTL_GET_NEXT_ADAPTER_FIB: {
- struct fib_ioctl __user *f;
-
- f = compat_alloc_user_space(sizeof(*f));
- ret = 0;
- if (clear_user(f, sizeof(*f)))
- ret = -EFAULT;
- if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
- ret = -EFAULT;
- if (!ret)
- ret = aac_do_ioctl(dev, cmd, f);
- break;
- }
-
- default:
- ret = -ENOIOCTLCMD;
- break;
- }
- return ret;
-}
-
-static int aac_compat_ioctl(struct scsi_device *sdev, unsigned int cmd,
- void __user *arg)
-{
- struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
-}
-
-static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
-{
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- return aac_compat_do_ioctl(file->private_data, cmd, arg);
-}
-#endif
-
static ssize_t aac_show_model(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -1523,7 +1466,7 @@ static const struct file_operations aac_cfg_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = aac_cfg_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = aac_compat_cfg_ioctl,
+ .compat_ioctl = aac_cfg_ioctl,
#endif
.open = aac_cfg_open,
.llseek = noop_llseek,
@@ -1536,7 +1479,7 @@ static struct scsi_host_template aac_driver_template = {
.info = aac_info,
.ioctl = aac_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = aac_compat_ioctl,
+ .compat_ioctl = aac_ioctl,
#endif
.queuecommand = aac_queuecommand,
.bios_param = aac_biosparm,
@@ -1910,11 +1853,9 @@ error_iounmap:
}
-#if (defined(CONFIG_PM))
-static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused aac_suspend(struct device *dev)
{
-
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_host_block(shost);
@@ -1923,29 +1864,14 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
aac_release_resources(aac);
- pci_set_drvdata(pdev, shost);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-static int aac_resume(struct pci_dev *pdev)
+static int __maybe_unused aac_resume(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
- int r;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- r = pci_enable_device(pdev);
-
- if (r)
- goto fail_device;
- pci_set_master(pdev);
if (aac_acquire_resources(aac))
goto fail_device;
/*
@@ -1960,10 +1886,8 @@ static int aac_resume(struct pci_dev *pdev)
fail_device:
printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
scsi_host_put(shost);
- pci_disable_device(pdev);
return -ENODEV;
}
-#endif
static void aac_shutdown(struct pci_dev *dev)
{
@@ -2108,15 +2032,14 @@ static struct pci_error_handlers aac_pci_err_handler = {
.resume = aac_pci_resume,
};
+static SIMPLE_DEV_PM_OPS(aac_pm_ops, aac_suspend, aac_resume);
+
static struct pci_driver aac_pci_driver = {
.name = AAC_DRIVERNAME,
.id_table = aac_pci_tbl,
.probe = aac_probe_one,
.remove = aac_remove_one,
-#if (defined(CONFIG_PM))
- .suspend = aac_suspend,
- .resume = aac_resume,
-#endif
+ .driver.pm = &aac_pm_ops,
.shutdown = aac_shutdown,
.err_handler = &aac_pci_err_handler,
};
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index c2c7850ff7b4..79830e77afa9 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2876,15 +2876,15 @@ static int asc_get_eeprom_string(ushort *serialnum, uchar *cp)
static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
- ASC_DVC_VAR *asc_dvc_varp;
ASCEEP_CONFIG *ep;
int i;
+ uchar serialstr[13];
#ifdef CONFIG_ISA
+ ASC_DVC_VAR *asc_dvc_varp;
int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 };
-#endif /* CONFIG_ISA */
- uchar serialstr[13];
asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+#endif /* CONFIG_ISA */
ep = &boardp->eep_config.asc_eep;
seq_printf(m,
@@ -3171,7 +3171,6 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
{
struct asc_board *boardp = shost_priv(shost);
- int chip_scsi_id;
seq_printf(m,
"\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n",
@@ -3197,12 +3196,6 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
boardp->asc_n_io_port);
seq_printf(m, " io_port 0x%lx\n", shost->io_port);
-
- if (ASC_NARROW_BOARD(boardp)) {
- chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
- } else {
- chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
- }
}
/*
@@ -6111,7 +6104,6 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
{
AdvPortAddr iop_base;
uchar int_stat;
- ushort target_bit;
ADV_CARR_T *free_carrp;
__le32 irq_next_vpa;
ADV_SCSI_REQ_Q *scsiq;
@@ -6198,8 +6190,6 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
asc_dvc->carr_freelist = free_carrp;
asc_dvc->carr_pending_cnt--;
- target_bit = ADV_TID_TO_TIDMASK(scsiq->target_id);
-
/*
* Clear request microcode control flag.
*/
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 5a227c03895f..0dc831026e9e 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -152,6 +152,7 @@ static int aha1740_makecode(unchar *sense, unchar *status)
retval=DID_ERROR; /* It's an Overrun */
/* If not overrun, assume underrun and
* ignore it! */
+ break;
case 0x00: /* No info, assume no error, should
* not occur */
break;
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 9a515551641c..dd5dfd4f30a5 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -1330,10 +1330,8 @@ const struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
int ahd_pci_config(struct ahd_softc *,
const struct ahd_pci_identity *);
int ahd_pci_test_register_access(struct ahd_softc *);
-#ifdef CONFIG_PM
-void ahd_pci_suspend(struct ahd_softc *);
-void ahd_pci_resume(struct ahd_softc *);
-#endif
+void __maybe_unused ahd_pci_suspend(struct ahd_softc *);
+void __maybe_unused ahd_pci_resume(struct ahd_softc *);
/************************** SCB and SCB queue management **********************/
void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd,
@@ -1344,10 +1342,8 @@ struct ahd_softc *ahd_alloc(void *platform_arg, char *name);
int ahd_softc_init(struct ahd_softc *);
void ahd_controller_info(struct ahd_softc *ahd, char *buf);
int ahd_init(struct ahd_softc *ahd);
-#ifdef CONFIG_PM
-int ahd_suspend(struct ahd_softc *ahd);
-void ahd_resume(struct ahd_softc *ahd);
-#endif
+int __maybe_unused ahd_suspend(struct ahd_softc *ahd);
+void __maybe_unused ahd_resume(struct ahd_softc *ahd);
int ahd_default_config(struct ahd_softc *ahd);
int ahd_parse_vpddata(struct ahd_softc *ahd,
struct vpd_config *vpd);
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 98b02e7d38bb..c55b5880eb7e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -6130,6 +6130,7 @@ ahd_free(struct ahd_softc *ahd)
fallthrough;
case 2:
ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
+ break;
case 1:
break;
case 0:
@@ -6542,8 +6543,8 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
kfree(hscb_map);
}
ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat);
- /* FALLTHROUGH */
}
+ fallthrough;
case 4:
case 3:
case 2:
@@ -7866,11 +7867,9 @@ ahd_pause_and_flushwork(struct ahd_softc *ahd)
ahd->flags &= ~AHD_ALL_INTERRUPTS;
}
-#ifdef CONFIG_PM
-int
+int __maybe_unused
ahd_suspend(struct ahd_softc *ahd)
{
-
ahd_pause_and_flushwork(ahd);
if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
@@ -7881,15 +7880,13 @@ ahd_suspend(struct ahd_softc *ahd)
return (0);
}
-void
+void __maybe_unused
ahd_resume(struct ahd_softc *ahd)
{
-
ahd_reset(ahd, /*reinit*/TRUE);
ahd_intr_enable(ahd, TRUE);
ahd_restart(ahd);
}
-#endif
/************************** Busy Target Table *********************************/
/*
@@ -8911,6 +8908,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
break;
case SIU_PFC_ILLEGAL_REQUEST:
printk("Illegal request\n");
+ break;
default:
break;
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index f32398939f74..d413b1c5fdc5 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -2140,7 +2140,6 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
u_int saved_scbptr;
u_int active_scbptr;
u_int last_phase;
- u_int saved_scsiid;
u_int cdb_byte;
int retval = SUCCESS;
int was_paused;
@@ -2254,7 +2253,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
* passed in command. That command is currently active on the
* bus or is in the disconnected state.
*/
- saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
+ ahd_inb(ahd, SAVED_SCSIID);
if (last_phase != P_BUSFREE
&& SCB_GET_TAG(pending_scb) == active_scbptr) {
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 8b891a05d9e7..07b670b80f1b 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -74,11 +74,10 @@ static const struct pci_device_id ahd_linux_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table);
-#ifdef CONFIG_PM
-static int
-ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
+static int __maybe_unused
+ahd_linux_pci_dev_suspend(struct device *dev)
{
- struct ahd_softc *ahd = pci_get_drvdata(pdev);
+ struct ahd_softc *ahd = dev_get_drvdata(dev);
int rc;
if ((rc = ahd_suspend(ahd)))
@@ -86,39 +85,20 @@ ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
ahd_pci_suspend(ahd);
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- if (mesg.event & PM_EVENT_SLEEP)
- pci_set_power_state(pdev, PCI_D3hot);
-
return rc;
}
-static int
-ahd_linux_pci_dev_resume(struct pci_dev *pdev)
+static int __maybe_unused
+ahd_linux_pci_dev_resume(struct device *dev)
{
- struct ahd_softc *ahd = pci_get_drvdata(pdev);
- int rc;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- if ((rc = pci_enable_device(pdev))) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to enable device after resume (%d)\n", rc);
- return rc;
- }
-
- pci_set_master(pdev);
+ struct ahd_softc *ahd = dev_get_drvdata(dev);
ahd_pci_resume(ahd);
ahd_resume(ahd);
- return rc;
+ return 0;
}
-#endif
static void
ahd_linux_pci_dev_remove(struct pci_dev *pdev)
@@ -224,13 +204,14 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return (0);
}
+static SIMPLE_DEV_PM_OPS(ahd_linux_pci_dev_pm_ops,
+ ahd_linux_pci_dev_suspend,
+ ahd_linux_pci_dev_resume);
+
static struct pci_driver aic79xx_pci_driver = {
.name = "aic79xx",
.probe = ahd_linux_pci_dev_probe,
-#ifdef CONFIG_PM
- .suspend = ahd_linux_pci_dev_suspend,
- .resume = ahd_linux_pci_dev_resume,
-#endif
+ .driver.pm = &ahd_linux_pci_dev_pm_ops,
.remove = ahd_linux_pci_dev_remove,
.id_table = ahd_linux_pci_id_table
};
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 8397ae93f7dd..2f0bdb9225a4 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -377,8 +377,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
return ahd_pci_map_int(ahd);
}
-#ifdef CONFIG_PM
-void
+void __maybe_unused
ahd_pci_suspend(struct ahd_softc *ahd)
{
/*
@@ -394,7 +393,7 @@ ahd_pci_suspend(struct ahd_softc *ahd)
}
-void
+void __maybe_unused
ahd_pci_resume(struct ahd_softc *ahd)
{
ahd_pci_write_config(ahd->dev_softc, DEVCONFIG,
@@ -404,7 +403,6 @@ ahd_pci_resume(struct ahd_softc *ahd)
ahd_pci_write_config(ahd->dev_softc, CSIZE_LATTIME,
ahd->suspend_state.pci_state.csize_lattime, /*bytes*/1);
}
-#endif
/*
* Perform some simple tests that should catch situations where
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 88b90f9806c9..11a09798e6b5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -1134,9 +1134,7 @@ const struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
int ahc_pci_config(struct ahc_softc *,
const struct ahc_pci_identity *);
int ahc_pci_test_register_access(struct ahc_softc *);
-#ifdef CONFIG_PM
-void ahc_pci_resume(struct ahc_softc *ahc);
-#endif
+void __maybe_unused ahc_pci_resume(struct ahc_softc *ahc);
/*************************** EISA/VL Front End ********************************/
struct aic7770_identity *aic7770_find_device(uint32_t);
@@ -1160,10 +1158,8 @@ int ahc_chip_init(struct ahc_softc *ahc);
int ahc_init(struct ahc_softc *ahc);
void ahc_intr_enable(struct ahc_softc *ahc, int enable);
void ahc_pause_and_flushwork(struct ahc_softc *ahc);
-#ifdef CONFIG_PM
-int ahc_suspend(struct ahc_softc *ahc);
-int ahc_resume(struct ahc_softc *ahc);
-#endif
+int __maybe_unused ahc_suspend(struct ahc_softc *ahc);
+int __maybe_unused ahc_resume(struct ahc_softc *ahc);
void ahc_set_unit(struct ahc_softc *, int);
void ahc_set_name(struct ahc_softc *, char *);
void ahc_free(struct ahc_softc *ahc);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 725bb7f58054..b1b852fe940b 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -4478,6 +4478,7 @@ ahc_free(struct ahc_softc *ahc)
fallthrough;
case 2:
ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
+ fallthrough;
case 1:
break;
case 0:
@@ -5590,8 +5591,7 @@ ahc_pause_and_flushwork(struct ahc_softc *ahc)
ahc->flags &= ~AHC_ALL_INTERRUPTS;
}
-#ifdef CONFIG_PM
-int
+int __maybe_unused
ahc_suspend(struct ahc_softc *ahc)
{
@@ -5617,7 +5617,7 @@ ahc_suspend(struct ahc_softc *ahc)
return (0);
}
-int
+int __maybe_unused
ahc_resume(struct ahc_softc *ahc)
{
@@ -5626,7 +5626,6 @@ ahc_resume(struct ahc_softc *ahc)
ahc_restart(ahc);
return (0);
}
-#endif
/************************** Busy Target Table *********************************/
/*
* Return the untagged transaction id for a given target/channel lun.
@@ -5867,9 +5866,8 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in qinfifo\n");
ahc_done(ahc, scb);
-
- /* FALLTHROUGH */
}
+ fallthrough;
case SEARCH_REMOVE:
break;
case SEARCH_COUNT:
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 9b293b1f0b71..a07e94fac673 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -121,47 +121,23 @@ static const struct pci_device_id ahc_linux_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table);
-#ifdef CONFIG_PM
-static int
-ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
+static int __maybe_unused
+ahc_linux_pci_dev_suspend(struct device *dev)
{
- struct ahc_softc *ahc = pci_get_drvdata(pdev);
- int rc;
-
- if ((rc = ahc_suspend(ahc)))
- return rc;
+ struct ahc_softc *ahc = dev_get_drvdata(dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- if (mesg.event & PM_EVENT_SLEEP)
- pci_set_power_state(pdev, PCI_D3hot);
-
- return rc;
+ return ahc_suspend(ahc);
}
-static int
-ahc_linux_pci_dev_resume(struct pci_dev *pdev)
+static int __maybe_unused
+ahc_linux_pci_dev_resume(struct device *dev)
{
- struct ahc_softc *ahc = pci_get_drvdata(pdev);
- int rc;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- if ((rc = pci_enable_device(pdev))) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to enable device after resume (%d)\n", rc);
- return rc;
- }
-
- pci_set_master(pdev);
+ struct ahc_softc *ahc = dev_get_drvdata(dev);
ahc_pci_resume(ahc);
return (ahc_resume(ahc));
}
-#endif
static void
ahc_linux_pci_dev_remove(struct pci_dev *pdev)
@@ -319,14 +295,14 @@ ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
}
}
+static SIMPLE_DEV_PM_OPS(ahc_linux_pci_dev_pm_ops,
+ ahc_linux_pci_dev_suspend,
+ ahc_linux_pci_dev_resume);
static struct pci_driver aic7xxx_pci_driver = {
.name = "aic7xxx",
.probe = ahc_linux_pci_dev_probe,
-#ifdef CONFIG_PM
- .suspend = ahc_linux_pci_dev_suspend,
- .resume = ahc_linux_pci_dev_resume,
-#endif
+ .driver.pm = &ahc_linux_pci_dev_pm_ops,
.remove = ahc_linux_pci_dev_remove,
.id_table = ahc_linux_pci_id_table
};
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 656f680c7802..dab3a6d12c4d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -2008,8 +2008,7 @@ ahc_pci_chip_init(struct ahc_softc *ahc)
return (ahc_chip_init(ahc));
}
-#ifdef CONFIG_PM
-void
+void __maybe_unused
ahc_pci_resume(struct ahc_softc *ahc)
{
/*
@@ -2040,7 +2039,6 @@ ahc_pci_resume(struct ahc_softc *ahc)
ahc_release_seeprom(&sd);
}
}
-#endif
static int
ahc_aic785X_setup(struct ahc_softc *ahc)
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index e2d880a5f391..13677973da5c 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -721,6 +721,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
fallthrough;
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SAS_SPEED_15_DIS;
+ fallthrough;
default:
case SAS_LINK_RATE_1_5_GBPS:
/* nothing to do */
@@ -739,6 +740,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->min_sata_lrate) {
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SATA_SPEED_15_DIS;
+ fallthrough;
default:
case SAS_LINK_RATE_1_5_GBPS:
/* nothing to do */
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index f923ed019d4a..71d18f607dae 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -269,7 +269,6 @@ Again:
case TA_I_T_NEXUS_LOSS:
opcode = dl->status_block[0];
goto Again;
- break;
case TF_INV_CONN_HANDLE:
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEVICE_UNKNOWN;
@@ -316,6 +315,7 @@ Again:
break;
case SAS_PROTOCOL_SSP:
asd_unbuild_ssp_ascb(ascb);
+ break;
default:
break;
}
@@ -610,6 +610,7 @@ out_err_unmap:
break;
case SAS_PROTOCOL_SSP:
asd_unbuild_ssp_ascb(a);
+ break;
default:
break;
}
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 5d054d5c70a5..0f6abd233614 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -83,6 +83,7 @@ struct device_attribute;
#define PCI_DEVICE_ID_ARECA_1886 0x188A
#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
#define ARCMSR_MINUTES (1000 * 60 * 60)
+#define ARCMSR_DEFAULT_TIMEOUT 90
/*
**********************************************************************************
**
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index e4fdb473b990..4b79661275c9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -99,6 +99,10 @@ static int set_date_time = 0;
module_param(set_date_time, int, S_IRUGO);
MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
+static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT;
+module_param(cmd_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90");
+
#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12
@@ -113,8 +117,8 @@ static int arcmsr_bios_param(struct scsi_device *sdev,
static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int arcmsr_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
-static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
-static int arcmsr_resume(struct pci_dev *pdev);
+static int __maybe_unused arcmsr_suspend(struct device *dev);
+static int __maybe_unused arcmsr_resume(struct device *dev);
static void arcmsr_remove(struct pci_dev *pdev);
static void arcmsr_shutdown(struct pci_dev *pdev);
static void arcmsr_iop_init(struct AdapterControlBlock *acb);
@@ -140,6 +144,7 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
static void arcmsr_set_iop_datetime(struct timer_list *);
+static int arcmsr_slave_config(struct scsi_device *sdev);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -155,6 +160,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
+ .slave_configure = arcmsr_slave_config,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
.can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
.this_id = ARCMSR_SCSI_INITIATOR_ID,
@@ -216,13 +222,14 @@ static struct pci_device_id arcmsr_device_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
+static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume);
+
static struct pci_driver arcmsr_pci_driver = {
.name = "arcmsr",
.id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
- .suspend = arcmsr_suspend,
- .resume = arcmsr_resume,
+ .driver.pm = &arcmsr_pm_ops,
.shutdown = arcmsr_shutdown,
};
/*
@@ -1126,8 +1133,9 @@ static void arcmsr_free_irq(struct pci_dev *pdev,
pci_free_irq_vectors(pdev);
}
-static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused arcmsr_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
@@ -1140,29 +1148,18 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
- pci_set_drvdata(pdev, host);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int arcmsr_resume(struct pci_dev *pdev)
+static int __maybe_unused arcmsr_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- if (pci_enable_device(pdev)) {
- pr_warn("%s: pci_enable_device error\n", __func__);
- return -ENODEV;
- }
if (arcmsr_set_dma_mask(acb))
goto controller_unregister;
- pci_set_master(pdev);
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
switch (acb->adapter_type) {
@@ -1207,9 +1204,7 @@ controller_unregister:
if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
arcmsr_free_io_queue(acb);
arcmsr_unmap_pciregion(acb);
- pci_release_regions(pdev);
scsi_host_put(host);
- pci_disable_device(pdev);
return -ENODEV;
}
@@ -3156,10 +3151,12 @@ message_out:
static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
{
- struct list_head *head = &acb->ccb_free_list;
+ struct list_head *head;
struct CommandControlBlock *ccb = NULL;
unsigned long flags;
+
spin_lock_irqsave(&acb->ccblist_lock, flags);
+ head = &acb->ccb_free_list;
if (!list_empty(head)) {
ccb = list_entry(head->next, struct CommandControlBlock, list);
list_del_init(&ccb->list);
@@ -3193,11 +3190,11 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
/* ISO, ECMA, & ANSI versions */
inqdata[4] = 31;
/* length of additional data */
- strncpy(&inqdata[8], "Areca ", 8);
+ memcpy(&inqdata[8], "Areca ", 8);
/* Vendor Identification */
- strncpy(&inqdata[16], "RAID controller ", 16);
+ memcpy(&inqdata[16], "RAID controller ", 16);
/* Product Identification */
- strncpy(&inqdata[32], "R001", 4); /* Product Revision */
+ memcpy(&inqdata[32], "R001", 4); /* Product Revision */
sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
@@ -3256,6 +3253,16 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(arcmsr_queue_command)
+static int arcmsr_slave_config(struct scsi_device *sdev)
+{
+ unsigned int dev_timeout;
+
+ dev_timeout = sdev->request_queue->rq_timeout;
+ if ((cmd_timeout > 0) && ((cmd_timeout * HZ) > dev_timeout))
+ blk_queue_rq_timeout(sdev->request_queue, cmd_timeout * HZ);
+ return 0;
+}
+
static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
{
int count;
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a82b63a66635..95d7a3586083 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -376,15 +376,11 @@ static int falcon_get_lock(struct Scsi_Host *instance)
if (IS_A_TT())
return 1;
- if (stdma_is_locked_by(scsi_falcon_intr) &&
- instance->hostt->can_queue > 1)
+ if (stdma_is_locked_by(scsi_falcon_intr))
return 1;
- if (in_interrupt())
- return stdma_try_lock(scsi_falcon_intr, instance);
-
- stdma_lock(scsi_falcon_intr, instance);
- return 1;
+ /* stdma_lock() may sleep which means it can't be used here */
+ return stdma_try_lock(scsi_falcon_intr, instance);
}
#ifndef MODULE
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 5c3513a4b450..90fcddb76f46 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -164,7 +164,7 @@ DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
beiscsi_active_session_disp, NULL);
DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
beiscsi_free_session_disp, NULL);
-struct device_attribute *beiscsi_attrs[] = {
+static struct device_attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable,
&dev_attr_beiscsi_drvr_ver,
&dev_attr_beiscsi_adapter_family,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 96d6e384b2b2..0d4928567265 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1244,18 +1244,14 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
case OC_DEVICE_ID2:
return snprintf(buf, PAGE_SIZE,
"Obsolete/Unsupported BE2 Adapter Family\n");
- break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID3:
return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n");
- break;
case OC_SKH_ID1:
return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n");
- break;
default:
return snprintf(buf, PAGE_SIZE,
"Unknown Adapter Family: 0x%x\n", dev_id);
- break;
}
}
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 3486e402bfc1..49a14157f123 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5671,7 +5671,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
bfa_fcs_lport_ms_fabric_rscn(port);
break;
}
- /* !!!!!!!!! Fall Through !!!!!!!!!!!!! */
+ fallthrough;
case FC_RSCN_FORMAT_AREA:
case FC_RSCN_FORMAT_DOMAIN:
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 325ad8a592bb..5740302d83ac 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -387,7 +387,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
- /* !!! fall through !!! */
+ fallthrough;
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
@@ -437,7 +437,7 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_hb_timer_stop(ioc);
- /* !!! fall through !!! */
+ fallthrough;
case IOC_E_HBFAIL:
if (ioc->iocpf.auto_recover)
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
@@ -3299,6 +3299,7 @@ bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
case BFI_ABLK_I2H_PORT_CONFIG:
/* update config port mode */
ablk->ioc->port_mode_cfg = rsp->port_mode;
+ break;
case BFI_ABLK_I2H_PF_DELETE:
case BFI_ABLK_I2H_PF_UPDATE:
@@ -5871,6 +5872,7 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
break;
case BFA_DCONF_SM_EXIT:
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+ break;
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_WR:
case BFA_DCONF_SM_FLASH_COMP:
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index b6e8ed757252..b4cea8b06ea1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -51,7 +51,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
-#include <scsi/fc_encode.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fip.h>
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 08fb7d5d08b3..16bb6d2f98de 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2088,7 +2088,7 @@ static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
{
struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
- if (interface->enabled == true) {
+ if (interface->enabled) {
if (!ctlr->lp) {
pr_err(PFX "__bnx2fc_disable: lport not found\n");
return -ENODEV;
@@ -2186,7 +2186,7 @@ static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
struct cnic_fc_npiv_tbl *npiv_tbl;
struct fc_lport *lport;
- if (interface->enabled == false) {
+ if (!interface->enabled) {
if (!ctlr->lp) {
pr_err(PFX "__bnx2fc_enable: lport not found\n");
return -ENODEV;
@@ -2277,7 +2277,7 @@ static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
case FCOE_CTLR_UNUSED:
default:
return -ENOTSUPP;
- };
+ }
}
enum bnx2fc_create_link_state {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 08992095ce7a..b37b0a9ec12d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -770,7 +770,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
} else
printk(KERN_ERR PFX "SRR in progress\n");
goto ret_err_rqe;
- break;
default:
break;
}
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 9010cb6045dc..fe0355c964bc 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -830,6 +830,7 @@ csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
if (flq_idx != -1)
csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
}
+ break;
default:
break;
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index b206e266b4e7..8b0deece9758 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
depends on PCI && INET && (IPV6 || IPV6=n)
depends on THERMAL || !THERMAL
depends on ETHERNET
+ depends on TLS || TLS=n
select NET_VENDOR_CHELSIO
select CHELSIO_T4
select CHELSIO_LIB
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index e4e0d767b98e..244fc27215dc 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -329,6 +329,7 @@ static int start_context(struct ocxlflash_context *ctx)
struct ocxl_hw_afu *afu = ctx->hw_afu;
struct ocxl_afu_config *acfg = &afu->acfg;
void *link_token = afu->link_token;
+ struct pci_dev *pdev = afu->pdev;
struct device *dev = afu->dev;
bool master = ctx->master;
struct mm_struct *mm;
@@ -360,8 +361,9 @@ static int start_context(struct ocxlflash_context *ctx)
mm = current->mm;
}
- rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
- ocxlflash_xsl_fault, ctx);
+ rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
+ pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
+ ctx);
if (unlikely(rc)) {
dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
__func__, rc);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index fa16894d8758..7b522ff345d5 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -1356,7 +1356,7 @@ void selection_timeout_missed(unsigned long ptr)
static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
struct ScsiReqBlk* srb)
{
- u16 s_stat2, return_code;
+ u16 __maybe_unused s_stat2, return_code;
u8 s_stat, scsicommand, i, identify_message;
u8 *ptr;
dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
@@ -2397,7 +2397,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
}
#endif /* DC395x_LASTPIO */
else { /* xfer pad */
- u8 data = 0, data2 = 0;
if (srb->sg_count) {
srb->adapter_status = H_OVER_UNDER_RUN;
srb->status |= OVER_RUN;
@@ -2412,8 +2411,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
CFG2_WIDEFIFO);
if (io_dir & DMACMD_DIR) {
- data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
- data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
} else {
/* Danger, Robinson: If you find KGs
* scattered over the wide disk, the driver
@@ -2427,7 +2426,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
/* Danger, Robinson: If you find a collection of Ks on your disk
* something broke :-( */
if (io_dir & DMACMD_DIR)
- data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
else
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
}
@@ -2989,7 +2988,6 @@ static void reselect(struct AdapterCtlBlk *acb)
struct ScsiReqBlk *srb = NULL;
u16 rsel_tar_lun_id;
u8 id, lun;
- u8 arblostflag = 0;
dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
clear_fifo(acb, "reselect");
@@ -3011,7 +3009,6 @@ static void reselect(struct AdapterCtlBlk *acb)
srb->cmd, dcb->target_id,
dcb->target_lun, rsel_tar_lun_id,
DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
- arblostflag = 1;
/*srb->state |= SRB_DISCONNECT; */
srb->state = SRB_READY;
@@ -3042,7 +3039,7 @@ static void reselect(struct AdapterCtlBlk *acb)
"disconnection? <%02i-%i>\n",
dcb->target_id, dcb->target_lun);
- if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
+ if (dcb->sync_mode & EN_TAG_QUEUEING) {
srb = acb->tmp_srb;
dcb->active_srb = srb;
} else {
@@ -3390,11 +3387,9 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
struct scsi_cmnd *p;
list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
- enum dma_data_direction dir;
int result;
p = srb->cmd;
- dir = p->sc_data_direction;
result = MK_RES(0, did_flag, 0, 0);
printk("G:%p(%02i-%i) ", p,
p->device->id, (u8)p->device->lun);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 308bda2e9c00..ea436a14087f 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -408,12 +408,20 @@ static char print_alua_state(unsigned char state)
static int alua_check_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
+ struct alua_dh_data *h = sdev->handler_data;
+ struct alua_port_group *pg;
+
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
/*
* LUN Not Accessible - ALUA state transition
*/
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (pg)
+ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+ rcu_read_unlock();
alua_check(sdev, false);
return NEEDS_RETRY;
}
@@ -1092,7 +1100,7 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
case SCSI_ACCESS_STATE_LBA:
return BLK_STS_OK;
case SCSI_ACCESS_STATE_TRANSITIONING:
- return BLK_STS_RESOURCE;
+ return BLK_STS_AGAIN;
default:
req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR;
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index e30d2f1f5368..ed63f7a9ea54 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -996,8 +996,9 @@ void esas2r_adapter_tasklet(unsigned long context);
irqreturn_t esas2r_interrupt(int irq, void *dev_id);
irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
void esas2r_kickoff_timer(struct esas2r_adapter *a);
-int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
-int esas2r_resume(struct pci_dev *pcid);
+
+extern const struct dev_pm_ops esas2r_pm_ops;
+
void esas2r_fw_event_off(struct esas2r_adapter *a);
void esas2r_fw_event_on(struct esas2r_adapter *a);
bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c
index 1c079f4300a5..ba42536d1e87 100644
--- a/drivers/scsi/esas2r/esas2r_disc.c
+++ b/drivers/scsi/esas2r/esas2r_disc.c
@@ -1031,8 +1031,9 @@ static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
{
struct esas2r_adapter *a = sgc->adapter;
- if (sgc->length > ESAS2R_DISC_BUF_LEN)
+ if (sgc->length > ESAS2R_DISC_BUF_LEN) {
esas2r_bugon();
+ }
*addr = a->uncached_phys
+ (u64)((u8 *)a->disc_buffer - a->uncached);
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 09c5c24bf391..c1a5ab662dc8 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -412,10 +412,11 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
esas2r_disable_chip_interrupts(a);
esas2r_check_adapter(a);
- if (!esas2r_init_adapter_hw(a, true))
+ if (!esas2r_init_adapter_hw(a, true)) {
esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
- else
+ } else {
esas2r_debug("esas2r_init_adapter ok");
+ }
esas2r_claim_interrupts(a);
@@ -640,53 +641,27 @@ void esas2r_kill_adapter(int i)
}
}
-int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused esas2r_suspend(struct device *dev)
{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
- u32 device_state;
+ struct Scsi_Host *host = dev_get_drvdata(dev);
struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
+ esas2r_log_dev(ESAS2R_LOG_INFO, dev, "suspending adapter()");
if (!a)
return -ENODEV;
esas2r_adapter_power_down(a, 1);
- device_state = pci_choose_state(pdev, state);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_save_state() called");
- pci_save_state(pdev);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_disable_device() called");
- pci_disable_device(pdev);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_set_power_state() called");
- pci_set_power_state(pdev, device_state);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
+ esas2r_log_dev(ESAS2R_LOG_INFO, dev, "esas2r_suspend(): 0");
return 0;
}
-int esas2r_resume(struct pci_dev *pdev)
+static int __maybe_unused esas2r_resume(struct device *dev)
{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct Scsi_Host *host = dev_get_drvdata(dev);
struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
- int rez;
-
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_set_power_state(PCI_D0) "
- "called");
- pci_set_power_state(pdev, PCI_D0);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_enable_wake(PCI_D0, 0) "
- "called");
- pci_enable_wake(pdev, PCI_D0, 0);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_restore_state() called");
- pci_restore_state(pdev);
- esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
- "pci_enable_device() called");
- rez = pci_enable_device(pdev);
- pci_set_master(pdev);
+ int rez = 0;
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, dev, "resuming adapter()");
if (!a) {
rez = -ENODEV;
@@ -730,11 +705,13 @@ int esas2r_resume(struct pci_dev *pdev)
}
error_exit:
- esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
+ esas2r_log_dev(ESAS2R_LOG_CRIT, dev, "esas2r_resume(): %d",
rez);
return rez;
}
+SIMPLE_DEV_PM_OPS(esas2r_pm_ops, esas2r_suspend, esas2r_resume);
+
bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
{
set_bit(AF_DEGRADED_MODE, &a->flags);
diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c
index f16d6bcf9bb6..5281d9356327 100644
--- a/drivers/scsi/esas2r/esas2r_int.c
+++ b/drivers/scsi/esas2r/esas2r_int.c
@@ -688,8 +688,9 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
esas2r_local_reset_adapter(a);
}
- if (!(doorbell & DRBL_FORCE_INT))
+ if (!(doorbell & DRBL_FORCE_INT)) {
esas2r_trace_exit();
+ }
}
void esas2r_force_interrupt(struct esas2r_adapter *a)
@@ -862,10 +863,11 @@ void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
ae.byflags = 0;
ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
- if (pwr_mgt)
+ if (pwr_mgt) {
esas2r_hdebug("*** sending power management AE ***");
- else
+ } else {
esas2r_hdebug("*** sending reset AE ***");
+ }
esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
sizeof(union atto_vda_ae));
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 7b49e2e9fcde..a9dd6345f064 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -346,8 +346,7 @@ static struct pci_driver
.id_table = esas2r_pci_table,
.probe = esas2r_probe,
.remove = esas2r_remove,
- .suspend = esas2r_suspend,
- .resume = esas2r_resume,
+ .driver.pm = &esas2r_pm_ops,
};
static int esas2r_probe(struct pci_dev *pcid,
@@ -894,15 +893,11 @@ static void complete_task_management_request(struct esas2r_adapter *a,
esas2r_free_request(a, rq);
}
-/**
+/*
* Searches the specified queue for the specified queue for the command
* to abort.
*
- * @param [in] a
- * @param [in] abort_request
- * @param [in] cmd
- * t
- * @return 0 on failure, 1 if command was not found, 2 if command was found
+ * Return 0 on failure, 1 if command was not found, 2 if command was found
*/
static int esas2r_check_active_queue(struct esas2r_adapter *a,
struct esas2r_request **abort_request,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 0f9274960dc6..03bf49adaafe 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1894,7 +1894,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
mutex_unlock(&fcoe_config_mutex);
fcoe_ctlr_device_delete(fcoe_ctlr_to_ctlr_dev(ctlr));
goto out;
- break;
case NETDEV_FEAT_CHANGE:
fcoe_netdev_features_change(lport, netdev);
break;
@@ -2024,7 +2023,7 @@ static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
case FCOE_CTLR_UNUSED:
default:
return -ENOTSUPP;
- };
+ }
}
/**
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index ffef2c8eddc6..af658aa38fed 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -312,7 +312,7 @@ static ssize_t store_ctlr_mode(struct device *dev,
default:
LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n");
return -ENOTSUPP;
- };
+ }
}
static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
@@ -346,7 +346,7 @@ static ssize_t store_ctlr_enabled(struct device *dev,
break;
case FCOE_CTLR_UNUSED:
return -ENOTSUPP;
- };
+ }
rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
if (rc)
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 477513dc23b7..69f373b53132 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.47"
+#define DRV_VERSION "1.6.0.53"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -245,6 +245,7 @@ struct fnic {
u32 vlan_hw_insert:1; /* let hw insert the tag */
u32 in_remove:1; /* fnic device in removal */
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
+ u32 link_events:1; /* set when we get any link event*/
struct completion *remove_wait; /* device remove thread blocks */
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index e3384afb7cbd..e0cee4dcb439 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -56,6 +56,8 @@ void fnic_handle_link(struct work_struct *work)
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->link_events = 1; /* less work to just set everytime*/
+
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
@@ -73,7 +75,7 @@ void fnic_handle_link(struct work_struct *work)
atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
new_port_speed);
if (old_port_speed != new_port_speed)
- shost_printk(KERN_INFO, fnic->lport->host,
+ FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
"Current vnic speed set to : %llu\n",
new_port_speed);
@@ -1349,7 +1351,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
}
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- shost_printk(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fip_timer: vlan %d state %d sol_count %d\n",
vlan->vid, vlan->state, vlan->sol_count);
switch (vlan->state) {
@@ -1372,7 +1374,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
* no response on this vlan, remove from the list.
* Try the next vlan
*/
- shost_printk(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"Dequeue this VLAN ID %d from list\n",
vlan->vid);
list_del(&vlan->list);
@@ -1382,7 +1384,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
/* we exhausted all vlans, restart vlan disc */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
- shost_printk(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"fip_timer: vlan list empty, "
"trigger vlan disc\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5f8a7ef8f6a8..186c3ab4456b 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -580,6 +580,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fnic->lport = lp;
fnic->ctlr.lp = lp;
+ fnic->link_events = 0;
+
snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
host->host_no);
@@ -740,6 +742,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < FNIC_IO_LOCKS; i++)
spin_lock_init(&fnic->io_req_lock[i]);
+ err = -ENOMEM;
fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
if (!fnic->io_req_pool)
goto err_out_free_resources;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d1f7b84bbfe8..36744968378f 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -921,10 +921,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
case FCPIO_SUCCESS:
sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
xfer_len = scsi_bufflen(sc);
- scsi_set_resid(sc, icmnd_cmpl->residual);
- if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
+ if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) {
xfer_len -= icmnd_cmpl->residual;
+ scsi_set_resid(sc, icmnd_cmpl->residual);
+ }
if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
atomic64_inc(&fnic_stats->misc_stats.check_condition);
@@ -1734,15 +1735,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
continue;
}
- cmd_rport = starget_to_rport(scsi_target(sc->device));
- if (rport != cmd_rport) {
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
- io_req = (struct fnic_io_req *)CMD_SP(sc);
-
- if (!io_req || rport != cmd_rport) {
+ cmd_rport = starget_to_rport(scsi_target(sc->device));
+ if (rport != cmd_rport) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
@@ -2673,7 +2673,8 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
/* Issue firmware reset for fnic, wait for reset to complete */
retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) &&
+ fnic->link_events) {
/* fw reset is in progress, poll for its completion */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
schedule_timeout(msecs_to_jiffies(100));
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index a2beee6e09f0..5988c300cc82 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
pr_err("error in devcmd2 init");
- return -ENODEV;
+ err = -ENODEV;
+ goto err_free_wq;
}
/*
@@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err)
- goto err_free_wq;
+ goto err_disable_wq;
vdev->devcmd2->result =
(struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
@@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err_free_desc_ring:
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
-err_free_wq:
+err_disable_wq:
vnic_wq_disable(&vdev->devcmd2->wq);
+err_free_wq:
vnic_wq_free(&vdev->devcmd2->wq);
err_free_devcmd2:
kfree(vdev->devcmd2);
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 29e4cdcade72..2df2f38a9b12 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -529,14 +529,14 @@ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata,
if (start == len - 128) {
/* Ignore End of DMA interrupt for the final buffer */
if (NCR5380_poll_politely(hostdata, hostdata->c400_ctl_status,
- CSR_HOST_BUF_NOT_RDY, 0, HZ / 64) < 0)
+ CSR_HOST_BUF_NOT_RDY, 0, 0) < 0)
break;
} else {
if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status,
CSR_HOST_BUF_NOT_RDY, 0,
hostdata->c400_ctl_status,
CSR_GATED_53C80_IRQ,
- CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
+ CSR_GATED_53C80_IRQ, 0) < 0 ||
NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
break;
}
@@ -565,7 +565,7 @@ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata,
if (residual == 0 && NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_END_DMA_TRANSFER,
BASR_END_DMA_TRANSFER,
- HZ / 64) < 0)
+ 0) < 0)
scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
__func__);
@@ -597,7 +597,7 @@ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
CSR_HOST_BUF_NOT_RDY, 0,
hostdata->c400_ctl_status,
CSR_GATED_53C80_IRQ,
- CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
+ CSR_GATED_53C80_IRQ, 0) < 0 ||
NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) {
/* Both 128 B buffers are in use */
if (start >= 128)
@@ -644,13 +644,13 @@ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
if (residual == 0) {
if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT,
- HZ / 64) < 0)
+ 0) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: Last Byte Sent timeout\n", __func__);
if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER,
- HZ / 64) < 0)
+ 0) < 0)
scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
__func__);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index a25cfc11c96d..e821dd32dd28 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -14,6 +14,7 @@
#include <linux/debugfs.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/lcm.h>
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
@@ -243,24 +244,6 @@ struct hisi_sas_slot {
u16 idx;
};
-#define HISI_SAS_DEBUGFS_REG(x) {#x, x}
-
-struct hisi_sas_debugfs_reg_lu {
- char *name;
- int off;
-};
-
-struct hisi_sas_debugfs_reg {
- const struct hisi_sas_debugfs_reg_lu *lu;
- int count;
- int base_off;
- union {
- u32 (*read_global_reg)(struct hisi_hba *hisi_hba, u32 off);
- u32 (*read_port_reg)(struct hisi_hba *hisi_hba, int port,
- u32 off);
- };
-};
-
struct hisi_sas_iost_itct_cache {
u32 data[HISI_SAS_IOST_ITCT_CACHE_DW_SZ];
};
@@ -312,6 +295,7 @@ enum {
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
+ int (*interrupt_preinit)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
@@ -350,15 +334,8 @@ struct hisi_sas_hw {
int delay_ms, int timeout_ms);
void (*snapshot_prepare)(struct hisi_hba *hisi_hba);
void (*snapshot_restore)(struct hisi_hba *hisi_hba);
- int (*set_bist)(struct hisi_hba *hisi_hba, bool enable);
- void (*read_iost_itct_cache)(struct hisi_hba *hisi_hba,
- enum hisi_sas_debugfs_cache_type type,
- u32 *cache);
int complete_hdr_size;
struct scsi_host_template *sht;
-
- const struct hisi_sas_debugfs_reg *debugfs_reg_array[DEBUGFS_REGS_NUM];
- const struct hisi_sas_debugfs_reg *debugfs_reg_port;
};
#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
@@ -418,6 +395,8 @@ struct hisi_hba {
u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
+ int *irq_map; /* v2 hw */
+
int n_phy;
spinlock_t lock;
struct semaphore sem;
@@ -673,7 +652,4 @@ extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
-extern void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba);
-extern void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba);
-extern void hisi_sas_debugfs_work_handler(struct work_struct *work);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 274ccf18ce2d..cf0bfac920a8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -591,13 +591,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
dev = hisi_hba->dev;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- /*
- * For IOs from upper layer, it may already disable preempt
- * in the IO path, if disable preempt again in down(),
- * function schedule() will report schedule_bug(), so check
- * preemptible() before goto down().
- */
- if (!preemptible())
+ if (!gfpflags_allow_blocking(gfp_flags))
return -EINVAL;
down(&hisi_hba->sem);
@@ -2620,6 +2614,13 @@ err_out:
return NULL;
}
+static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ if (hisi_hba->hw->interrupt_preinit)
+ return hisi_hba->hw->interrupt_preinit(hisi_hba);
+ return 0;
+}
+
int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
{
@@ -2677,6 +2678,10 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ rc = hisi_sas_interrupt_preinit(hisi_hba);
+ if (rc)
+ goto err_out_ha;
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha;
@@ -2696,1355 +2701,12 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
- hisi_sas_debugfs_exit(hisi_hba);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
return rc;
}
EXPORT_SYMBOL_GPL(hisi_sas_probe);
-struct dentry *hisi_sas_debugfs_dir;
-
-static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
-{
- int queue_entry_size = hisi_hba->hw->complete_hdr_size;
- int dump_index = hisi_hba->debugfs_dump_index;
- int i;
-
- for (i = 0; i < hisi_hba->queue_count; i++)
- memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
- hisi_hba->complete_hdr[i],
- HISI_SAS_QUEUE_SLOTS * queue_entry_size);
-}
-
-static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
-{
- int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
- int dump_index = hisi_hba->debugfs_dump_index;
- int i;
-
- for (i = 0; i < hisi_hba->queue_count; i++) {
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
- int j;
-
- debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
- cmd_hdr = hisi_hba->cmd_hdr[i];
-
- for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
- memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
- queue_entry_size);
- }
-}
-
-static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- const struct hisi_sas_debugfs_reg *port =
- hisi_hba->hw->debugfs_reg_port;
- int i, phy_cnt;
- u32 offset;
- u32 *databuf;
-
- for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
- databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
- for (i = 0; i < port->count; i++, databuf++) {
- offset = port->base_off + 4 * i;
- *databuf = port->read_port_reg(hisi_hba, phy_cnt,
- offset);
- }
- }
-}
-
-static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const struct hisi_sas_debugfs_reg *global =
- hw->debugfs_reg_array[DEBUGFS_GLOBAL];
- int i;
-
- for (i = 0; i < global->count; i++, databuf++)
- *databuf = global->read_global_reg(hisi_hba, 4 * i);
-}
-
-static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const struct hisi_sas_debugfs_reg *axi =
- hw->debugfs_reg_array[DEBUGFS_AXI];
- int i;
-
- for (i = 0; i < axi->count; i++, databuf++)
- *databuf = axi->read_global_reg(hisi_hba,
- 4 * i + axi->base_off);
-}
-
-static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const struct hisi_sas_debugfs_reg *ras =
- hw->debugfs_reg_array[DEBUGFS_RAS];
- int i;
-
- for (i = 0; i < ras->count; i++, databuf++)
- *databuf = ras->read_global_reg(hisi_hba,
- 4 * i + ras->base_off);
-}
-
-static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
- void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
- struct hisi_sas_itct *itct;
- int i;
-
- hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_ITCT_CACHE,
- cachebuf);
-
- itct = hisi_hba->itct;
-
- for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
- memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
- databuf += sizeof(struct hisi_sas_itct);
- }
-}
-
-static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
-{
- int dump_index = hisi_hba->debugfs_dump_index;
- int max_command_entries = HISI_SAS_MAX_COMMANDS;
- void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
- void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
- struct hisi_sas_iost *iost;
- int i;
-
- hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_IOST_CACHE,
- cachebuf);
-
- iost = hisi_hba->iost;
-
- for (i = 0; i < max_command_entries; i++, iost++) {
- memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
- databuf += sizeof(struct hisi_sas_iost);
- }
-}
-
-static const char *
-hisi_sas_debugfs_to_reg_name(int off, int base_off,
- const struct hisi_sas_debugfs_reg_lu *lu)
-{
- for (; lu->name; lu++) {
- if (off == lu->off - base_off)
- return lu->name;
- }
-
- return NULL;
-}
-
-static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
- struct seq_file *s)
-{
- const struct hisi_sas_debugfs_reg *reg = ptr;
- int i;
-
- for (i = 0; i < reg->count; i++) {
- int off = i * 4;
- const char *name;
-
- name = hisi_sas_debugfs_to_reg_name(off, reg->base_off,
- reg->lu);
-
- if (name)
- seq_printf(s, "0x%08x 0x%08x %s\n", off,
- regs_val[i], name);
- else
- seq_printf(s, "0x%08x 0x%08x\n", off,
- regs_val[i]);
- }
-}
-
-static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_regs *global = s->private;
- struct hisi_hba *hisi_hba = global->hisi_hba;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
-
- hisi_sas_debugfs_print_reg(global->data,
- reg_global, s);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_global_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_global_fops = {
- .open = hisi_sas_debugfs_global_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_regs *axi = s->private;
- struct hisi_hba *hisi_hba = axi->hisi_hba;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
-
- hisi_sas_debugfs_print_reg(axi->data,
- reg_axi, s);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_axi_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_axi_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_axi_fops = {
- .open = hisi_sas_debugfs_axi_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_regs *ras = s->private;
- struct hisi_hba *hisi_hba = ras->hisi_hba;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
-
- hisi_sas_debugfs_print_reg(ras->data,
- reg_ras, s);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_ras_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_ras_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_ras_fops = {
- .open = hisi_sas_debugfs_ras_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_port *port = s->private;
- struct hisi_sas_phy *phy = port->phy;
- struct hisi_hba *hisi_hba = phy->hisi_hba;
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
-
- hisi_sas_debugfs_print_reg(port->data, reg_port, s);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_port_fops = {
- .open = hisi_sas_debugfs_port_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static void hisi_sas_show_row_64(struct seq_file *s, int index,
- int sz, __le64 *ptr)
-{
- int i;
-
- /* completion header size not fixed per HW version */
- seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 8; i++, ptr++) {
- seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
- if (!(i % 2))
- seq_puts(s, "\n\t");
- }
-
- seq_puts(s, "\n");
-}
-
-static void hisi_sas_show_row_32(struct seq_file *s, int index,
- int sz, __le32 *ptr)
-{
- int i;
-
- /* completion header size not fixed per HW version */
- seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 4; i++, ptr++) {
- seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
- if (!(i % 4))
- seq_puts(s, "\n\t");
- }
- seq_puts(s, "\n");
-}
-
-static void hisi_sas_cq_show_slot(struct seq_file *s, int slot,
- struct hisi_sas_debugfs_cq *debugfs_cq)
-{
- struct hisi_sas_cq *cq = debugfs_cq->cq;
- struct hisi_hba *hisi_hba = cq->hisi_hba;
- __le32 *complete_hdr = debugfs_cq->complete_hdr +
- (hisi_hba->hw->complete_hdr_size * slot);
-
- hisi_sas_show_row_32(s, slot,
- hisi_hba->hw->complete_hdr_size,
- complete_hdr);
-}
-
-static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
- int slot;
-
- for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- hisi_sas_cq_show_slot(s, slot, debugfs_cq);
- }
- return 0;
-}
-
-static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_cq_fops = {
- .open = hisi_sas_debugfs_cq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
-{
- struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
- void *cmd_queue = debugfs_dq->hdr;
- __le32 *cmd_hdr = cmd_queue +
- sizeof(struct hisi_sas_cmd_hdr) * slot;
-
- hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr);
-}
-
-static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
-{
- int slot;
-
- for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- hisi_sas_dq_show_slot(s, slot, s->private);
- }
- return 0;
-}
-
-static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_dq_fops = {
- .open = hisi_sas_debugfs_dq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
- struct hisi_sas_iost *iost = debugfs_iost->iost;
- int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
-
- for (i = 0; i < max_command_entries; i++, iost++) {
- __le64 *data = &iost->qw0;
-
- hisi_sas_show_row_64(s, i, sizeof(*iost), data);
- }
-
- return 0;
-}
-
-static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_iost_fops = {
- .open = hisi_sas_debugfs_iost_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
- struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
- int i, tab_idx;
- __le64 *iost;
-
- for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
- /*
- * Data struct of IOST cache:
- * Data[1]: BIT0~15: Table index
- * Bit16: Valid mask
- * Data[2]~[9]: IOST table
- */
- tab_idx = (iost_cache->data[1] & 0xffff);
- iost = (__le64 *)iost_cache;
-
- hisi_sas_show_row_64(s, tab_idx, cache_size, iost);
- }
-
- return 0;
-}
-
-static int hisi_sas_debugfs_iost_cache_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_iost_cache_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
- .open = hisi_sas_debugfs_iost_cache_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
-{
- int i;
- struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
- struct hisi_sas_itct *itct = debugfs_itct->itct;
-
- for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
- __le64 *data = &itct->qw0;
-
- hisi_sas_show_row_64(s, i, sizeof(*itct), data);
- }
-
- return 0;
-}
-
-static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_itct_fops = {
- .open = hisi_sas_debugfs_itct_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
- struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
- int i, tab_idx;
- __le64 *itct;
-
- for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
- /*
- * Data struct of ITCT cache:
- * Data[1]: BIT0~15: Table index
- * Bit16: Valid mask
- * Data[2]~[9]: ITCT table
- */
- tab_idx = itct_cache->data[1] & 0xffff;
- itct = (__le64 *)itct_cache;
-
- hisi_sas_show_row_64(s, tab_idx, cache_size, itct);
- }
-
- return 0;
-}
-
-static int hisi_sas_debugfs_itct_cache_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_itct_cache_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
- .open = hisi_sas_debugfs_itct_cache_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
-{
- u64 *debugfs_timestamp;
- int dump_index = hisi_hba->debugfs_dump_index;
- struct dentry *dump_dentry;
- struct dentry *dentry;
- char name[256];
- int p;
- int c;
- int d;
-
- snprintf(name, 256, "%d", dump_index);
-
- dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
-
- debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
-
- debugfs_create_u64("timestamp", 0400, dump_dentry,
- debugfs_timestamp);
-
- debugfs_create_file("global", 0400, dump_dentry,
- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
- &hisi_sas_debugfs_global_fops);
-
- /* Create port dir and files */
- dentry = debugfs_create_dir("port", dump_dentry);
- for (p = 0; p < hisi_hba->n_phy; p++) {
- snprintf(name, 256, "%d", p);
-
- debugfs_create_file(name, 0400, dentry,
- &hisi_hba->debugfs_port_reg[dump_index][p],
- &hisi_sas_debugfs_port_fops);
- }
-
- /* Create CQ dir and files */
- dentry = debugfs_create_dir("cq", dump_dentry);
- for (c = 0; c < hisi_hba->queue_count; c++) {
- snprintf(name, 256, "%d", c);
-
- debugfs_create_file(name, 0400, dentry,
- &hisi_hba->debugfs_cq[dump_index][c],
- &hisi_sas_debugfs_cq_fops);
- }
-
- /* Create DQ dir and files */
- dentry = debugfs_create_dir("dq", dump_dentry);
- for (d = 0; d < hisi_hba->queue_count; d++) {
- snprintf(name, 256, "%d", d);
-
- debugfs_create_file(name, 0400, dentry,
- &hisi_hba->debugfs_dq[dump_index][d],
- &hisi_sas_debugfs_dq_fops);
- }
-
- debugfs_create_file("iost", 0400, dump_dentry,
- &hisi_hba->debugfs_iost[dump_index],
- &hisi_sas_debugfs_iost_fops);
-
- debugfs_create_file("iost_cache", 0400, dump_dentry,
- &hisi_hba->debugfs_iost_cache[dump_index],
- &hisi_sas_debugfs_iost_cache_fops);
-
- debugfs_create_file("itct", 0400, dump_dentry,
- &hisi_hba->debugfs_itct[dump_index],
- &hisi_sas_debugfs_itct_fops);
-
- debugfs_create_file("itct_cache", 0400, dump_dentry,
- &hisi_hba->debugfs_itct_cache[dump_index],
- &hisi_sas_debugfs_itct_cache_fops);
-
- debugfs_create_file("axi", 0400, dump_dentry,
- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
- &hisi_sas_debugfs_axi_fops);
-
- debugfs_create_file("ras", 0400, dump_dentry,
- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
- &hisi_sas_debugfs_ras_fops);
-
- return;
-}
-
-static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
-{
- hisi_hba->hw->snapshot_prepare(hisi_hba);
-
- hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_axi_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_ras_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
- hisi_sas_debugfs_snapshot_iost_reg(hisi_hba);
-
- hisi_sas_debugfs_create_files(hisi_hba);
-
- hisi_hba->hw->snapshot_restore(hisi_hba);
-}
-
-static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hisi_hba *hisi_hba = file->f_inode->i_private;
- char buf[8];
-
- if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
- return -EFAULT;
-
- if (count > 8)
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- if (buf[0] != '1')
- return -EFAULT;
-
- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-
- return count;
-}
-
-static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
- .write = &hisi_sas_debugfs_trigger_dump_write,
- .owner = THIS_MODULE,
-};
-
-enum {
- HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0,
- HISI_SAS_BIST_LOOPBACK_MODE_SERDES,
- HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
-};
-
-static const struct {
- int value;
- char *name;
-} hisi_sas_debugfs_loop_linkrate[] = {
- { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
- { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
- { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
- { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
-};
-
-static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
- int match = (hisi_hba->debugfs_bist_linkrate ==
- hisi_sas_debugfs_loop_linkrate[i].value);
-
- seq_printf(s, "%s%s%s ", match ? "[" : "",
- hisi_sas_debugfs_loop_linkrate[i].name,
- match ? "]" : "");
- }
- seq_puts(s, "\n");
-
- return 0;
-}
-
-static ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
- bool found = false;
- int i;
-
- if (hisi_hba->debugfs_bist_enable)
- return -EPERM;
-
- if (count >= sizeof(kbuf))
- return -EOVERFLOW;
-
- if (copy_from_user(kbuf, buf, count))
- return -EINVAL;
-
- pkbuf = strstrip(kbuf);
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
- if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name,
- pkbuf, 16)) {
- hisi_hba->debugfs_bist_linkrate =
- hisi_sas_debugfs_loop_linkrate[i].value;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_linkrate_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = {
- .open = hisi_sas_debugfs_bist_linkrate_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_linkrate_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct {
- int value;
- char *name;
-} hisi_sas_debugfs_loop_code_mode[] = {
- { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" },
- { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" },
- { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" },
- { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" },
- { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" },
- { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" },
- { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" },
- { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" },
- { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" },
- { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" },
- { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" },
- { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" },
-};
-
-static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
- int match = (hisi_hba->debugfs_bist_code_mode ==
- hisi_sas_debugfs_loop_code_mode[i].value);
-
- seq_printf(s, "%s%s%s ", match ? "[" : "",
- hisi_sas_debugfs_loop_code_mode[i].name,
- match ? "]" : "");
- }
- seq_puts(s, "\n");
-
- return 0;
-}
-
-static ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp,
- const char __user *buf,
- size_t count,
- loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
- bool found = false;
- int i;
-
- if (hisi_hba->debugfs_bist_enable)
- return -EPERM;
-
- if (count >= sizeof(kbuf))
- return -EINVAL;
-
- if (copy_from_user(kbuf, buf, count))
- return -EOVERFLOW;
-
- pkbuf = strstrip(kbuf);
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
- if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name,
- pkbuf, 16)) {
- hisi_hba->debugfs_bist_code_mode =
- hisi_sas_debugfs_loop_code_mode[i].value;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_code_mode_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = {
- .open = hisi_sas_debugfs_bist_code_mode_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_code_mode_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- unsigned int phy_no;
- int val;
-
- if (hisi_hba->debugfs_bist_enable)
- return -EPERM;
-
- val = kstrtouint_from_user(buf, count, 0, &phy_no);
- if (val)
- return val;
-
- if (phy_no >= hisi_hba->n_phy)
- return -EINVAL;
-
- hisi_hba->debugfs_bist_phy_no = phy_no;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
-
- seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_bist_phy_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_phy_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_phy_ops = {
- .open = hisi_sas_debugfs_bist_phy_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_phy_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct {
- int value;
- char *name;
-} hisi_sas_debugfs_loop_modes[] = {
- { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
- { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
- { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
-};
-
-static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
- int match = (hisi_hba->debugfs_bist_mode ==
- hisi_sas_debugfs_loop_modes[i].value);
-
- seq_printf(s, "%s%s%s ", match ? "[" : "",
- hisi_sas_debugfs_loop_modes[i].name,
- match ? "]" : "");
- }
- seq_puts(s, "\n");
-
- return 0;
-}
-
-static ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
- bool found = false;
- int i;
-
- if (hisi_hba->debugfs_bist_enable)
- return -EPERM;
-
- if (count >= sizeof(kbuf))
- return -EINVAL;
-
- if (copy_from_user(kbuf, buf, count))
- return -EOVERFLOW;
-
- pkbuf = strstrip(kbuf);
-
- for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
- if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) {
- hisi_hba->debugfs_bist_mode =
- hisi_sas_debugfs_loop_modes[i].value;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_mode_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_mode_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_mode_ops = {
- .open = hisi_sas_debugfs_bist_mode_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_mode_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- struct hisi_hba *hisi_hba = m->private;
- unsigned int enable;
- int val;
-
- val = kstrtouint_from_user(buf, count, 0, &enable);
- if (val)
- return val;
-
- if (enable > 1)
- return -EINVAL;
-
- if (enable == hisi_hba->debugfs_bist_enable)
- return count;
-
- if (!hisi_hba->hw->set_bist)
- return -EPERM;
-
- val = hisi_hba->hw->set_bist(hisi_hba, enable);
- if (val < 0)
- return val;
-
- hisi_hba->debugfs_bist_enable = enable;
-
- return count;
-}
-
-static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p)
-{
- struct hisi_hba *hisi_hba = s->private;
-
- seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_bist_enable_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_bist_enable_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
- .open = hisi_sas_debugfs_bist_enable_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_bist_enable_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct {
- char *name;
-} hisi_sas_debugfs_ffe_name[FFE_CFG_MAX] = {
- { "SAS_1_5_GBPS" },
- { "SAS_3_0_GBPS" },
- { "SAS_6_0_GBPS" },
- { "SAS_12_0_GBPS" },
- { "FFE_RESV" },
- { "SATA_1_5_GBPS" },
- { "SATA_3_0_GBPS" },
- { "SATA_6_0_GBPS" },
-};
-
-static ssize_t hisi_sas_debugfs_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = filp->private_data;
- u32 *val = m->private;
- int res;
-
- res = kstrtouint_from_user(buf, count, 0, val);
- if (res)
- return res;
-
- return count;
-}
-
-static int hisi_sas_debugfs_show(struct seq_file *s, void *p)
-{
- u32 *val = s->private;
-
- seq_printf(s, "0x%x\n", *val);
-
- return 0;
-}
-
-static int hisi_sas_debugfs_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_ops = {
- .open = hisi_sas_debugfs_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *s = filp->private_data;
- struct hisi_sas_phy *phy = s->private;
- unsigned int set_val;
- int res;
-
- res = kstrtouint_from_user(buf, count, 0, &set_val);
- if (res)
- return res;
-
- if (set_val > 0)
- return -EINVAL;
-
- atomic_set(&phy->down_cnt, 0);
-
- return count;
-}
-
-static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p)
-{
- struct hisi_sas_phy *phy = s->private;
-
- seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
-
- return 0;
-}
-
-static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show,
- inode->i_private);
-}
-
-static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = {
- .open = hisi_sas_debugfs_phy_down_cnt_open,
- .read = seq_read,
- .write = hisi_sas_debugfs_phy_down_cnt_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-void hisi_sas_debugfs_work_handler(struct work_struct *work)
-{
- struct hisi_hba *hisi_hba =
- container_of(work, struct hisi_hba, debugfs_work);
- int debugfs_dump_index = hisi_hba->debugfs_dump_index;
- struct device *dev = hisi_hba->dev;
- u64 timestamp = local_clock();
-
- if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
- dev_warn(dev, "dump count exceeded!\n");
- return;
- }
-
- do_div(timestamp, NSEC_PER_MSEC);
- hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
-
- hisi_sas_debugfs_snapshot_regs(hisi_hba);
- hisi_hba->debugfs_dump_index++;
-}
-EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
-
-static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index)
-{
- struct device *dev = hisi_hba->dev;
- int i;
-
- devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
- devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
- devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
- devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
-
- for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
-
- for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev,
- hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
-
- for (i = 0; i < DEBUGFS_REGS_NUM; i++)
- devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
-
- for (i = 0; i < hisi_hba->n_phy; i++)
- devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
-}
-
-static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index)
-{
- const struct hisi_sas_hw *hw = hisi_hba->hw;
- struct device *dev = hisi_hba->dev;
- int p, c, d, r, i;
- size_t sz;
-
- for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
- struct hisi_sas_debugfs_regs *regs =
- &hisi_hba->debugfs_regs[dump_index][r];
-
- sz = hw->debugfs_reg_array[r]->count * 4;
- regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!regs->data)
- goto fail;
- regs->hisi_hba = hisi_hba;
- }
-
- sz = hw->debugfs_reg_port->count * 4;
- for (p = 0; p < hisi_hba->n_phy; p++) {
- struct hisi_sas_debugfs_port *port =
- &hisi_hba->debugfs_port_reg[dump_index][p];
-
- port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!port->data)
- goto fail;
- port->phy = &hisi_hba->phy[p];
- }
-
- sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
- for (c = 0; c < hisi_hba->queue_count; c++) {
- struct hisi_sas_debugfs_cq *cq =
- &hisi_hba->debugfs_cq[dump_index][c];
-
- cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!cq->complete_hdr)
- goto fail;
- cq->cq = &hisi_hba->cq[c];
- }
-
- sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
- for (d = 0; d < hisi_hba->queue_count; d++) {
- struct hisi_sas_debugfs_dq *dq =
- &hisi_hba->debugfs_dq[dump_index][d];
-
- dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!dq->hdr)
- goto fail;
- dq->dq = &hisi_hba->dq[d];
- }
-
- sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
-
- hisi_hba->debugfs_iost[dump_index].iost =
- devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost[dump_index].iost)
- goto fail;
-
- sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
- sizeof(struct hisi_sas_iost_itct_cache);
-
- hisi_hba->debugfs_iost_cache[dump_index].cache =
- devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
- goto fail;
-
- sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
- sizeof(struct hisi_sas_iost_itct_cache);
-
- hisi_hba->debugfs_itct_cache[dump_index].cache =
- devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
- goto fail;
-
- /* New memory allocation must be locate before itct */
- sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
-
- hisi_hba->debugfs_itct[dump_index].itct =
- devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct[dump_index].itct)
- goto fail;
-
- return 0;
-fail:
- for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
- hisi_sas_debugfs_release(hisi_hba, i);
- return -ENOMEM;
-}
-
-static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
-{
- struct dentry *dir = debugfs_create_dir("phy_down_cnt",
- hisi_hba->debugfs_dir);
- char name[16];
- int phy_no;
-
- for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
- snprintf(name, 16, "%d", phy_no);
- debugfs_create_file(name, 0600, dir,
- &hisi_hba->phy[phy_no],
- &hisi_sas_debugfs_phy_down_cnt_ops);
- }
-}
-
-static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
-{
- struct dentry *ports_dentry;
- int phy_no;
-
- hisi_hba->debugfs_bist_dentry =
- debugfs_create_dir("bist", hisi_hba->debugfs_dir);
- debugfs_create_file("link_rate", 0600,
- hisi_hba->debugfs_bist_dentry, hisi_hba,
- &hisi_sas_debugfs_bist_linkrate_ops);
-
- debugfs_create_file("code_mode", 0600,
- hisi_hba->debugfs_bist_dentry, hisi_hba,
- &hisi_sas_debugfs_bist_code_mode_ops);
-
- debugfs_create_file("fixed_code", 0600,
- hisi_hba->debugfs_bist_dentry,
- &hisi_hba->debugfs_bist_fixed_code[0],
- &hisi_sas_debugfs_ops);
-
- debugfs_create_file("fixed_code_1", 0600,
- hisi_hba->debugfs_bist_dentry,
- &hisi_hba->debugfs_bist_fixed_code[1],
- &hisi_sas_debugfs_ops);
-
- debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
- hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
-
- debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
- &hisi_hba->debugfs_bist_cnt);
-
- debugfs_create_file("loopback_mode", 0600,
- hisi_hba->debugfs_bist_dentry,
- hisi_hba, &hisi_sas_debugfs_bist_mode_ops);
-
- debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
- hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
-
- ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry);
-
- for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
- struct dentry *port_dentry;
- struct dentry *ffe_dentry;
- char name[256];
- int i;
-
- snprintf(name, 256, "%d", phy_no);
- port_dentry = debugfs_create_dir(name, ports_dentry);
- ffe_dentry = debugfs_create_dir("ffe", port_dentry);
- for (i = 0; i < FFE_CFG_MAX; i++) {
- if (i == FFE_RESV)
- continue;
- debugfs_create_file(hisi_sas_debugfs_ffe_name[i].name,
- 0600, ffe_dentry,
- &hisi_hba->debugfs_bist_ffe[phy_no][i],
- &hisi_sas_debugfs_ops);
- }
- }
-
- hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
-}
-
-void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
-{
- struct device *dev = hisi_hba->dev;
- int i;
-
- hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
- hisi_sas_debugfs_dir);
- debugfs_create_file("trigger_dump", 0200,
- hisi_hba->debugfs_dir,
- hisi_hba,
- &hisi_sas_debugfs_trigger_dump_fops);
-
- /* create bist structures */
- hisi_sas_debugfs_bist_init(hisi_hba);
-
- hisi_hba->debugfs_dump_dentry =
- debugfs_create_dir("dump", hisi_hba->debugfs_dir);
-
- hisi_sas_debugfs_phy_down_cnt_init(hisi_hba);
-
- for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
- if (hisi_sas_debugfs_alloc(hisi_hba, i)) {
- debugfs_remove_recursive(hisi_hba->debugfs_dir);
- dev_dbg(dev, "failed to init debugfs!\n");
- break;
- }
- }
-}
-EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
-
-void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba)
-{
- debugfs_remove_recursive(hisi_hba->debugfs_dir);
-}
-EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit);
-
int hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
@@ -4073,6 +2735,9 @@ EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
+struct dentry *hisi_sas_debugfs_dir;
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir);
+
static __init int hisi_sas_init(void)
{
hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b57177b52fac..9adfdefef9ca 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3302,6 +3302,28 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
fatal_axi_int_v2_hw
};
+#define CQ0_IRQ_INDEX (96)
+
+static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ struct platform_device *pdev = hisi_hba->platform_dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct irq_affinity desc = {
+ .pre_vectors = CQ0_IRQ_INDEX,
+ .post_vectors = 16,
+ };
+ int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec;
+
+ nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128,
+ &hisi_hba->irq_map);
+ if (nvec < 0)
+ return nvec;
+
+ shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv;
+
+ return 0;
+}
+
/*
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
@@ -3310,14 +3332,11 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
- int irq, rc = 0, irq_map[128];
+ int irq, rc = 0;
int i, phy_no, fatal_no, queue_no;
- for (i = 0; i < 128; i++)
- irq_map[i] = platform_get_irq(pdev, i);
-
for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
- irq = irq_map[i + 1]; /* Phy up/down is irq1 */
+ irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */
rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
@@ -3331,7 +3350,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- irq = irq_map[phy_no + 72];
+ irq = hisi_hba->irq_map[phy_no + 72];
rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
DRV_NAME " sata", phy);
if (rc) {
@@ -3343,7 +3362,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
- irq = irq_map[fatal_no + 81];
+ irq = hisi_hba->irq_map[fatal_no + 81];
rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
@@ -3354,24 +3373,22 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
- for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+ for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
- cq->irq_no = irq_map[queue_no + 96];
+ cq->irq_no = hisi_hba->irq_map[queue_no + 96];
rc = devm_request_threaded_irq(dev, cq->irq_no,
cq_interrupt_v2_hw,
cq_thread_v2_hw, IRQF_ONESHOT,
DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
- irq, rc);
+ cq->irq_no, rc);
rc = -ENOENT;
goto err_out;
}
+ cq->irq_mask = irq_get_affinity_mask(cq->irq_no);
}
-
- hisi_hba->cq_nvecs = hisi_hba->queue_count;
-
err_out:
return rc;
}
@@ -3529,6 +3546,26 @@ static struct device_attribute *host_attrs_v2_hw[] = {
NULL
};
+static int map_queues_v2_hw(struct Scsi_Host *shost)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
+ mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
+ if (!mask)
+ continue;
+
+ for_each_cpu(cpu, mask)
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
+ }
+
+ return 0;
+
+}
+
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
@@ -3553,10 +3590,13 @@ static struct scsi_host_template sht_v2_hw = {
#endif
.shost_attrs = host_attrs_v2_hw,
.host_reset = hisi_sas_host_reset,
+ .map_queues = map_queues_v2_hw,
+ .host_tagset = 1,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
+ .interrupt_preinit = hisi_sas_v2_interrupt_preinit,
.setup_itct = setup_itct_v2_hw,
.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
.alloc_dev = alloc_dev_quirk_v2_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 960de375ce69..7c12804b4e1d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -522,6 +522,8 @@ module_param(auto_affine_msi_experimental, bool, 0444);
MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n"
"default is off");
+static void debugfs_work_handler_v3_hw(struct work_struct *work);
+
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
void __iomem *regs = hisi_hba->regs + off;
@@ -2409,8 +2411,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
DRV_NAME " phy", hisi_hba);
if (rc) {
dev_err(dev, "could not request phy interrupt, rc=%d\n", rc);
- rc = -ENOENT;
- goto free_irq_vectors;
+ return -ENOENT;
}
rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
@@ -2418,8 +2419,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
DRV_NAME " channel", hisi_hba);
if (rc) {
dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
- rc = -ENOENT;
- goto free_irq_vectors;
+ return -ENOENT;
}
rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
@@ -2427,8 +2427,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
DRV_NAME " fatal", hisi_hba);
if (rc) {
dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
- rc = -ENOENT;
- goto free_irq_vectors;
+ return -ENOENT;
}
if (hisi_sas_intr_conv)
@@ -2449,8 +2448,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
i, rc);
- rc = -ENOENT;
- goto free_irq_vectors;
+ return -ENOENT;
}
cq->irq_mask = pci_irq_get_affinity(pdev, i + BASE_VECTORS_V3_HW);
if (!cq->irq_mask) {
@@ -2460,10 +2458,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
}
return 0;
-
-free_irq_vectors:
- pci_free_irq_vectors(pdev);
- return rc;
}
static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
@@ -2766,6 +2760,19 @@ static struct device_attribute *host_attrs_v3_hw[] = {
NULL
};
+#define HISI_SAS_DEBUGFS_REG(x) {#x, x}
+
+struct hisi_sas_debugfs_reg_lu {
+ char *name;
+ int off;
+};
+
+struct hisi_sas_debugfs_reg {
+ const struct hisi_sas_debugfs_reg_lu *lu;
+ int count;
+ int base_off;
+};
+
static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = {
HISI_SAS_DEBUGFS_REG(PHY_CFG),
HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE),
@@ -2821,7 +2828,6 @@ static const struct hisi_sas_debugfs_reg debugfs_port_reg = {
.lu = debugfs_port_reg_lu,
.count = 0x100,
.base_off = PORT_BASE,
- .read_port_reg = hisi_sas_phy_read32,
};
static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
@@ -2894,7 +2900,6 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
.lu = debugfs_global_reg_lu,
.count = 0x800,
- .read_global_reg = hisi_sas_read32,
};
static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
@@ -2909,7 +2914,6 @@ static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
.lu = debugfs_axi_reg_lu,
.count = 0x61,
.base_off = AXI_MASTER_CFG_BASE,
- .read_global_reg = hisi_sas_read32,
};
static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
@@ -2927,7 +2931,6 @@ static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
.lu = debugfs_ras_reg_lu,
.count = 0x10,
.base_off = RAS_BASE,
- .read_global_reg = hisi_sas_read32,
};
static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
@@ -3147,7 +3150,6 @@ static struct scsi_host_template sht_v3_hw = {
};
static const struct hisi_sas_hw hisi_sas_v3_hw = {
- .hw_init = hisi_sas_v3_init,
.setup_itct = setup_itct_v3_hw,
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
@@ -3170,14 +3172,6 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.get_events = phy_get_events_v3_hw,
.write_gpio = write_gpio_v3_hw,
.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
- .debugfs_reg_array[DEBUGFS_GLOBAL] = &debugfs_global_reg,
- .debugfs_reg_array[DEBUGFS_AXI] = &debugfs_axi_reg,
- .debugfs_reg_array[DEBUGFS_RAS] = &debugfs_ras_reg,
- .debugfs_reg_port = &debugfs_port_reg,
- .snapshot_prepare = debugfs_snapshot_prepare_v3_hw,
- .snapshot_restore = debugfs_snapshot_restore_v3_hw,
- .read_iost_itct_cache = read_iost_itct_cache_v3_hw,
- .set_bist = debugfs_set_bist_v3_hw,
};
static struct Scsi_Host *
@@ -3195,7 +3189,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
hisi_hba = shost_priv(shost);
INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
- INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler);
+ INIT_WORK(&hisi_hba->debugfs_work, debugfs_work_handler_v3_hw);
hisi_hba->hw = &hisi_sas_v3_hw;
hisi_hba->pci_dev = pdev;
hisi_hba->dev = dev;
@@ -3223,6 +3217,1196 @@ err_out:
return NULL;
}
+static void debugfs_snapshot_cq_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int queue_entry_size = hisi_hba->hw->complete_hdr_size;
+ int dump_index = hisi_hba->debugfs_dump_index;
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
+ hisi_hba->complete_hdr[i],
+ HISI_SAS_QUEUE_SLOTS * queue_entry_size);
+}
+
+static void debugfs_snapshot_dq_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
+ int dump_index = hisi_hba->debugfs_dump_index;
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
+ int j;
+
+ debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
+ cmd_hdr = hisi_hba->cmd_hdr[i];
+
+ for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
+ memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
+ queue_entry_size);
+ }
+}
+
+static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ const struct hisi_sas_debugfs_reg *port = &debugfs_port_reg;
+ int i, phy_cnt;
+ u32 offset;
+ u32 *databuf;
+
+ for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
+ databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
+ for (i = 0; i < port->count; i++, databuf++) {
+ offset = port->base_off + 4 * i;
+ *databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt,
+ offset);
+ }
+ }
+}
+
+static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
+ int i;
+
+ for (i = 0; i < debugfs_axi_reg.count; i++, databuf++)
+ *databuf = hisi_sas_read32(hisi_hba, 4 * i);
+}
+
+static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
+ const struct hisi_sas_debugfs_reg *axi = &debugfs_axi_reg;
+ int i;
+
+ for (i = 0; i < axi->count; i++, databuf++)
+ *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off);
+}
+
+static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
+ const struct hisi_sas_debugfs_reg *ras = &debugfs_ras_reg;
+ int i;
+
+ for (i = 0; i < ras->count; i++, databuf++)
+ *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off);
+}
+
+static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
+ struct hisi_sas_itct *itct;
+ int i;
+
+ read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_ITCT_CACHE, cachebuf);
+
+ itct = hisi_hba->itct;
+
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
+ databuf += sizeof(struct hisi_sas_itct);
+ }
+}
+
+static void debugfs_snapshot_iost_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int dump_index = hisi_hba->debugfs_dump_index;
+ int max_command_entries = HISI_SAS_MAX_COMMANDS;
+ void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
+ struct hisi_sas_iost *iost;
+ int i;
+
+ read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_IOST_CACHE, cachebuf);
+
+ iost = hisi_hba->iost;
+
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
+ databuf += sizeof(struct hisi_sas_iost);
+ }
+}
+
+static const char *
+debugfs_to_reg_name_v3_hw(int off, int base_off,
+ const struct hisi_sas_debugfs_reg_lu *lu)
+{
+ for (; lu->name; lu++) {
+ if (off == lu->off - base_off)
+ return lu->name;
+ }
+
+ return NULL;
+}
+
+static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
+ const struct hisi_sas_debugfs_reg *reg)
+{
+ int i;
+
+ for (i = 0; i < reg->count; i++) {
+ int off = i * 4;
+ const char *name;
+
+ name = debugfs_to_reg_name_v3_hw(off, reg->base_off,
+ reg->lu);
+
+ if (name)
+ seq_printf(s, "0x%08x 0x%08x %s\n", off,
+ regs_val[i], name);
+ else
+ seq_printf(s, "0x%08x 0x%08x\n", off,
+ regs_val[i]);
+ }
+}
+
+static int debugfs_global_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_regs *global = s->private;
+
+ debugfs_print_reg_v3_hw(global->data, s,
+ &debugfs_global_reg);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_global_v3_hw);
+
+static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_regs *axi = s->private;
+
+ debugfs_print_reg_v3_hw(axi->data, s,
+ &debugfs_axi_reg);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_axi_v3_hw);
+
+static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_regs *ras = s->private;
+
+ debugfs_print_reg_v3_hw(ras->data, s,
+ &debugfs_ras_reg);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_ras_v3_hw);
+
+static int debugfs_port_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_port *port = s->private;
+ const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg;
+
+ debugfs_print_reg_v3_hw(port->data, s, reg_port);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_port_v3_hw);
+
+static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index,
+ int sz, __le64 *ptr)
+{
+ int i;
+
+ /* completion header size not fixed per HW version */
+ seq_printf(s, "index %04d:\n\t", index);
+ for (i = 1; i <= sz / 8; i++, ptr++) {
+ seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
+ if (!(i % 2))
+ seq_puts(s, "\n\t");
+ }
+
+ seq_puts(s, "\n");
+}
+
+static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index,
+ int sz, __le32 *ptr)
+{
+ int i;
+
+ /* completion header size not fixed per HW version */
+ seq_printf(s, "index %04d:\n\t", index);
+ for (i = 1; i <= sz / 4; i++, ptr++) {
+ seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
+ if (!(i % 4))
+ seq_puts(s, "\n\t");
+ }
+ seq_puts(s, "\n");
+}
+
+static void debugfs_cq_show_slot_v3_hw(struct seq_file *s, int slot,
+ struct hisi_sas_debugfs_cq *debugfs_cq)
+{
+ struct hisi_sas_cq *cq = debugfs_cq->cq;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ __le32 *complete_hdr = debugfs_cq->complete_hdr +
+ (hisi_hba->hw->complete_hdr_size * slot);
+
+ debugfs_show_row_32_v3_hw(s, slot,
+ hisi_hba->hw->complete_hdr_size,
+ complete_hdr);
+}
+
+static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
+ int slot;
+
+ for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
+ debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_cq_v3_hw);
+
+static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot,
+ void *dq_ptr)
+{
+ struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
+ void *cmd_queue = debugfs_dq->hdr;
+ __le32 *cmd_hdr = cmd_queue +
+ sizeof(struct hisi_sas_cmd_hdr) * slot;
+
+ debugfs_show_row_32_v3_hw(s, slot, sizeof(struct hisi_sas_cmd_hdr),
+ cmd_hdr);
+}
+
+static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p)
+{
+ int slot;
+
+ for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
+ debugfs_dq_show_slot_v3_hw(s, slot, s->private);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_dq_v3_hw);
+
+static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
+ struct hisi_sas_iost *iost = debugfs_iost->iost;
+ int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
+
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ __le64 *data = &iost->qw0;
+
+ debugfs_show_row_64_v3_hw(s, i, sizeof(*iost), data);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_iost_v3_hw);
+
+static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
+ struct hisi_sas_iost_itct_cache *iost_cache =
+ debugfs_iost_cache->cache;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ int i, tab_idx;
+ __le64 *iost;
+
+ for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
+ /*
+ * Data struct of IOST cache:
+ * Data[1]: BIT0~15: Table index
+ * Bit16: Valid mask
+ * Data[2]~[9]: IOST table
+ */
+ tab_idx = (iost_cache->data[1] & 0xffff);
+ iost = (__le64 *)iost_cache;
+
+ debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, iost);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_iost_cache_v3_hw);
+
+static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p)
+{
+ int i;
+ struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
+ struct hisi_sas_itct *itct = debugfs_itct->itct;
+
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ __le64 *data = &itct->qw0;
+
+ debugfs_show_row_64_v3_hw(s, i, sizeof(*itct), data);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_itct_v3_hw);
+
+static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
+ struct hisi_sas_iost_itct_cache *itct_cache =
+ debugfs_itct_cache->cache;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ int i, tab_idx;
+ __le64 *itct;
+
+ for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
+ /*
+ * Data struct of ITCT cache:
+ * Data[1]: BIT0~15: Table index
+ * Bit16: Valid mask
+ * Data[2]~[9]: ITCT table
+ */
+ tab_idx = itct_cache->data[1] & 0xffff;
+ itct = (__le64 *)itct_cache;
+
+ debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, itct);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw);
+
+static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+{
+ u64 *debugfs_timestamp;
+ int dump_index = hisi_hba->debugfs_dump_index;
+ struct dentry *dump_dentry;
+ struct dentry *dentry;
+ char name[256];
+ int p;
+ int c;
+ int d;
+
+ snprintf(name, 256, "%d", dump_index);
+
+ dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
+
+ debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
+
+ debugfs_create_u64("timestamp", 0400, dump_dentry,
+ debugfs_timestamp);
+
+ debugfs_create_file("global", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
+ &debugfs_global_v3_hw_fops);
+
+ /* Create port dir and files */
+ dentry = debugfs_create_dir("port", dump_dentry);
+ for (p = 0; p < hisi_hba->n_phy; p++) {
+ snprintf(name, 256, "%d", p);
+
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_port_reg[dump_index][p],
+ &debugfs_port_v3_hw_fops);
+ }
+
+ /* Create CQ dir and files */
+ dentry = debugfs_create_dir("cq", dump_dentry);
+ for (c = 0; c < hisi_hba->queue_count; c++) {
+ snprintf(name, 256, "%d", c);
+
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_cq[dump_index][c],
+ &debugfs_cq_v3_hw_fops);
+ }
+
+ /* Create DQ dir and files */
+ dentry = debugfs_create_dir("dq", dump_dentry);
+ for (d = 0; d < hisi_hba->queue_count; d++) {
+ snprintf(name, 256, "%d", d);
+
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_dq[dump_index][d],
+ &debugfs_dq_v3_hw_fops);
+ }
+
+ debugfs_create_file("iost", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost[dump_index],
+ &debugfs_iost_v3_hw_fops);
+
+ debugfs_create_file("iost_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost_cache[dump_index],
+ &debugfs_iost_cache_v3_hw_fops);
+
+ debugfs_create_file("itct", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct[dump_index],
+ &debugfs_itct_v3_hw_fops);
+
+ debugfs_create_file("itct_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct_cache[dump_index],
+ &debugfs_itct_cache_v3_hw_fops);
+
+ debugfs_create_file("axi", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
+ &debugfs_axi_v3_hw_fops);
+
+ debugfs_create_file("ras", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
+ &debugfs_ras_v3_hw_fops);
+}
+
+static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
+{
+ debugfs_snapshot_prepare_v3_hw(hisi_hba);
+
+ debugfs_snapshot_global_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_port_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_axi_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_ras_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_cq_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_dq_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
+
+ debugfs_create_files_v3_hw(hisi_hba);
+
+ debugfs_snapshot_restore_v3_hw(hisi_hba);
+}
+
+static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hisi_hba *hisi_hba = file->f_inode->i_private;
+ char buf[8];
+
+ if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
+ return -EFAULT;
+
+ if (count > 8)
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (buf[0] != '1')
+ return -EFAULT;
+
+ queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+
+ return count;
+}
+
+static const struct file_operations debugfs_trigger_dump_v3_hw_fops = {
+ .write = &debugfs_trigger_dump_v3_hw_write,
+ .owner = THIS_MODULE,
+};
+
+enum {
+ HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0,
+ HISI_SAS_BIST_LOOPBACK_MODE_SERDES,
+ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
+};
+
+static const struct {
+ int value;
+ char *name;
+} debugfs_loop_linkrate_v3_hw[] = {
+ { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
+ { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
+ { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
+ { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
+};
+
+static int debugfs_bist_linkrate_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) {
+ int match = (hisi_hba->debugfs_bist_linkrate ==
+ debugfs_loop_linkrate_v3_hw[i].value);
+
+ seq_printf(s, "%s%s%s ", match ? "[" : "",
+ debugfs_loop_linkrate_v3_hw[i].name,
+ match ? "]" : "");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ char kbuf[16] = {}, *pkbuf;
+ bool found = false;
+ int i;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ if (count >= sizeof(kbuf))
+ return -EOVERFLOW;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EINVAL;
+
+ pkbuf = strstrip(kbuf);
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) {
+ if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name,
+ pkbuf, 16)) {
+ hisi_hba->debugfs_bist_linkrate =
+ debugfs_loop_linkrate_v3_hw[i].value;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return count;
+}
+
+static int debugfs_bist_linkrate_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_linkrate_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_linkrate_v3_hw_fops = {
+ .open = debugfs_bist_linkrate_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_linkrate_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct {
+ int value;
+ char *name;
+} debugfs_loop_code_mode_v3_hw[] = {
+ { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" },
+ { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" },
+ { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" },
+ { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" },
+ { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" },
+ { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" },
+ { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" },
+ { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" },
+ { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" },
+ { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" },
+ { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" },
+ { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" },
+};
+
+static int debugfs_bist_code_mode_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) {
+ int match = (hisi_hba->debugfs_bist_code_mode ==
+ debugfs_loop_code_mode_v3_hw[i].value);
+
+ seq_printf(s, "%s%s%s ", match ? "[" : "",
+ debugfs_loop_code_mode_v3_hw[i].name,
+ match ? "]" : "");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ char kbuf[16] = {}, *pkbuf;
+ bool found = false;
+ int i;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EOVERFLOW;
+
+ pkbuf = strstrip(kbuf);
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) {
+ if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name,
+ pkbuf, 16)) {
+ hisi_hba->debugfs_bist_code_mode =
+ debugfs_loop_code_mode_v3_hw[i].value;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return count;
+}
+
+static int debugfs_bist_code_mode_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_code_mode_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_code_mode_v3_hw_fops = {
+ .open = debugfs_bist_code_mode_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_code_mode_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t debugfs_bist_phy_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ unsigned int phy_no;
+ int val;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ val = kstrtouint_from_user(buf, count, 0, &phy_no);
+ if (val)
+ return val;
+
+ if (phy_no >= hisi_hba->n_phy)
+ return -EINVAL;
+
+ hisi_hba->debugfs_bist_phy_no = phy_no;
+
+ return count;
+}
+
+static int debugfs_bist_phy_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+
+ seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no);
+
+ return 0;
+}
+
+static int debugfs_bist_phy_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_phy_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_phy_v3_hw_fops = {
+ .open = debugfs_bist_phy_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_phy_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct {
+ int value;
+ char *name;
+} debugfs_loop_modes_v3_hw[] = {
+ { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
+};
+
+static int debugfs_bist_mode_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) {
+ int match = (hisi_hba->debugfs_bist_mode ==
+ debugfs_loop_modes_v3_hw[i].value);
+
+ seq_printf(s, "%s%s%s ", match ? "[" : "",
+ debugfs_loop_modes_v3_hw[i].name,
+ match ? "]" : "");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ char kbuf[16] = {}, *pkbuf;
+ bool found = false;
+ int i;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EOVERFLOW;
+
+ pkbuf = strstrip(kbuf);
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) {
+ if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) {
+ hisi_hba->debugfs_bist_mode =
+ debugfs_loop_modes_v3_hw[i].value;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return count;
+}
+
+static int debugfs_bist_mode_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_mode_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_mode_v3_hw_fops = {
+ .open = debugfs_bist_mode_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_mode_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t debugfs_bist_enable_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ unsigned int enable;
+ int val;
+
+ val = kstrtouint_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ if (enable > 1)
+ return -EINVAL;
+
+ if (enable == hisi_hba->debugfs_bist_enable)
+ return count;
+
+ val = debugfs_set_bist_v3_hw(hisi_hba, enable);
+ if (val < 0)
+ return val;
+
+ hisi_hba->debugfs_bist_enable = enable;
+
+ return count;
+}
+
+static int debugfs_bist_enable_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+
+ seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable);
+
+ return 0;
+}
+
+static int debugfs_bist_enable_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_enable_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_enable_v3_hw_fops = {
+ .open = debugfs_bist_enable_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_enable_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct {
+ char *name;
+} debugfs_ffe_name_v3_hw[FFE_CFG_MAX] = {
+ { "SAS_1_5_GBPS" },
+ { "SAS_3_0_GBPS" },
+ { "SAS_6_0_GBPS" },
+ { "SAS_12_0_GBPS" },
+ { "FFE_RESV" },
+ { "SATA_1_5_GBPS" },
+ { "SATA_3_0_GBPS" },
+ { "SATA_6_0_GBPS" },
+};
+
+static ssize_t debugfs_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ u32 *val = m->private;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, val);
+ if (res)
+ return res;
+
+ return count;
+}
+
+static int debugfs_v3_hw_show(struct seq_file *s, void *p)
+{
+ u32 *val = s->private;
+
+ seq_printf(s, "0x%x\n", *val);
+
+ return 0;
+}
+
+static int debugfs_v3_hw_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, debugfs_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_v3_hw_fops = {
+ .open = debugfs_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t debugfs_phy_down_cnt_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct hisi_sas_phy *phy = s->private;
+ unsigned int set_val;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, &set_val);
+ if (res)
+ return res;
+
+ if (set_val > 0)
+ return -EINVAL;
+
+ atomic_set(&phy->down_cnt, 0);
+
+ return count;
+}
+
+static int debugfs_phy_down_cnt_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_phy *phy = s->private;
+
+ seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
+
+ return 0;
+}
+
+static int debugfs_phy_down_cnt_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_phy_down_cnt_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_phy_down_cnt_v3_hw_fops = {
+ .open = debugfs_phy_down_cnt_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_phy_down_cnt_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static void debugfs_work_handler_v3_hw(struct work_struct *work)
+{
+ struct hisi_hba *hisi_hba =
+ container_of(work, struct hisi_hba, debugfs_work);
+ int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+ struct device *dev = hisi_hba->dev;
+ u64 timestamp = local_clock();
+
+ if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+ dev_warn(dev, "dump count exceeded!\n");
+ return;
+ }
+
+ do_div(timestamp, NSEC_PER_MSEC);
+ hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
+
+ debugfs_snapshot_regs_v3_hw(hisi_hba);
+ hisi_hba->debugfs_dump_index++;
+}
+
+static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
+{
+ struct device *dev = hisi_hba->dev;
+ int i;
+
+ devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
+ devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ devm_kfree(dev,
+ hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
+
+ for (i = 0; i < DEBUGFS_REGS_NUM; i++)
+ devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
+}
+
+static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = {
+ [DEBUGFS_GLOBAL] = &debugfs_global_reg,
+ [DEBUGFS_AXI] = &debugfs_axi_reg,
+ [DEBUGFS_RAS] = &debugfs_ras_reg,
+};
+
+static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
+{
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ struct device *dev = hisi_hba->dev;
+ int p, c, d, r, i;
+ size_t sz;
+
+ for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
+ struct hisi_sas_debugfs_regs *regs =
+ &hisi_hba->debugfs_regs[dump_index][r];
+
+ sz = debugfs_reg_array_v3_hw[r]->count * 4;
+ regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!regs->data)
+ goto fail;
+ regs->hisi_hba = hisi_hba;
+ }
+
+ sz = debugfs_port_reg.count * 4;
+ for (p = 0; p < hisi_hba->n_phy; p++) {
+ struct hisi_sas_debugfs_port *port =
+ &hisi_hba->debugfs_port_reg[dump_index][p];
+
+ port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!port->data)
+ goto fail;
+ port->phy = &hisi_hba->phy[p];
+ }
+
+ sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
+ for (c = 0; c < hisi_hba->queue_count; c++) {
+ struct hisi_sas_debugfs_cq *cq =
+ &hisi_hba->debugfs_cq[dump_index][c];
+
+ cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!cq->complete_hdr)
+ goto fail;
+ cq->cq = &hisi_hba->cq[c];
+ }
+
+ sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
+ for (d = 0; d < hisi_hba->queue_count; d++) {
+ struct hisi_sas_debugfs_dq *dq =
+ &hisi_hba->debugfs_dq[dump_index][d];
+
+ dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!dq->hdr)
+ goto fail;
+ dq->dq = &hisi_hba->dq[d];
+ }
+
+ sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
+
+ hisi_hba->debugfs_iost[dump_index].iost =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost[dump_index].iost)
+ goto fail;
+
+ sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
+ sizeof(struct hisi_sas_iost_itct_cache);
+
+ hisi_hba->debugfs_iost_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
+ goto fail;
+
+ sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
+ sizeof(struct hisi_sas_iost_itct_cache);
+
+ hisi_hba->debugfs_itct_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
+ goto fail;
+
+ /* New memory allocation must be locate before itct */
+ sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
+
+ hisi_hba->debugfs_itct[dump_index].itct =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct[dump_index].itct)
+ goto fail;
+
+ return 0;
+fail:
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
+ debugfs_release_v3_hw(hisi_hba, i);
+ return -ENOMEM;
+}
+
+static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct dentry *dir = debugfs_create_dir("phy_down_cnt",
+ hisi_hba->debugfs_dir);
+ char name[16];
+ int phy_no;
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ snprintf(name, 16, "%d", phy_no);
+ debugfs_create_file(name, 0600, dir,
+ &hisi_hba->phy[phy_no],
+ &debugfs_phy_down_cnt_v3_hw_fops);
+ }
+}
+
+static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct dentry *ports_dentry;
+ int phy_no;
+
+ hisi_hba->debugfs_bist_dentry =
+ debugfs_create_dir("bist", hisi_hba->debugfs_dir);
+ debugfs_create_file("link_rate", 0600,
+ hisi_hba->debugfs_bist_dentry, hisi_hba,
+ &debugfs_bist_linkrate_v3_hw_fops);
+
+ debugfs_create_file("code_mode", 0600,
+ hisi_hba->debugfs_bist_dentry, hisi_hba,
+ &debugfs_bist_code_mode_v3_hw_fops);
+
+ debugfs_create_file("fixed_code", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_fixed_code[0],
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("fixed_code_1", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_fixed_code[1],
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &debugfs_bist_phy_v3_hw_fops);
+
+ debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_cnt);
+
+ debugfs_create_file("loopback_mode", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &debugfs_bist_mode_v3_hw_fops);
+
+ debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &debugfs_bist_enable_v3_hw_fops);
+
+ ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry);
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ struct dentry *port_dentry;
+ struct dentry *ffe_dentry;
+ char name[256];
+ int i;
+
+ snprintf(name, 256, "%d", phy_no);
+ port_dentry = debugfs_create_dir(name, ports_dentry);
+ ffe_dentry = debugfs_create_dir("ffe", port_dentry);
+ for (i = 0; i < FFE_CFG_MAX; i++) {
+ if (i == FFE_RESV)
+ continue;
+ debugfs_create_file(debugfs_ffe_name_v3_hw[i].name,
+ 0600, ffe_dentry,
+ &hisi_hba->debugfs_bist_ffe[phy_no][i],
+ &debugfs_v3_hw_fops);
+ }
+ }
+
+ hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+}
+
+static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ int i;
+
+ hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
+ hisi_sas_debugfs_dir);
+ debugfs_create_file("trigger_dump", 0200,
+ hisi_hba->debugfs_dir,
+ hisi_hba,
+ &debugfs_trigger_dump_v3_hw_fops);
+
+ /* create bist structures */
+ debugfs_bist_init_v3_hw(hisi_hba);
+
+ hisi_hba->debugfs_dump_dentry =
+ debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+
+ debugfs_phy_down_cnt_init_v3_hw(hisi_hba);
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ if (debugfs_alloc_v3_hw(hisi_hba, i)) {
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ break;
+ }
+ }
+}
+
+static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+{
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+}
+
static int
hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -3313,21 +4497,21 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (hisi_sas_debugfs_enable)
- hisi_sas_debugfs_init(hisi_hba);
+ debugfs_init_v3_hw(hisi_hba);
rc = interrupt_preinit_v3_hw(hisi_hba);
if (rc)
- goto err_out_ha;
+ goto err_out_debugfs;
dev_err(dev, "%d hw queues\n", shost->nr_hw_queues);
rc = scsi_add_host(shost, dev);
if (rc)
- goto err_out_ha;
+ goto err_out_free_irq_vectors;
rc = sas_register_ha(sha);
if (rc)
goto err_out_register_ha;
- rc = hisi_hba->hw->hw_init(hisi_hba);
+ rc = hisi_sas_v3_init(hisi_hba);
if (rc)
goto err_out_register_ha;
@@ -3348,8 +4532,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_out_register_ha:
scsi_remove_host(shost);
+err_out_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+err_out_debugfs:
+ debugfs_exit_v3_hw(hisi_hba);
err_out_ha:
- hisi_sas_debugfs_exit(hisi_hba);
+ hisi_sas_free(hisi_hba);
scsi_host_put(shost);
err_out_regions:
pci_release_regions(pdev);
@@ -3394,7 +4582,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
- hisi_sas_debugfs_exit(hisi_hba);
+ debugfs_exit_v3_hw(hisi_hba);
scsi_host_put(shost);
}
@@ -3445,7 +4633,6 @@ static int _suspend_v3_hw(struct device *device)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
- pci_power_t device_state;
int rc;
if (!pdev->pm_cap) {
@@ -3471,12 +4658,7 @@ static int _suspend_v3_hw(struct device *device)
hisi_sas_init_mem(hisi_hba);
- device_state = pci_choose_state(pdev, PMSG_SUSPEND);
- dev_warn(dev, "entering operating state [D%d]\n",
- device_state);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, device_state);
+ dev_warn(dev, "entering suspend state\n");
hisi_sas_release_tasks(hisi_hba);
@@ -3496,16 +4678,7 @@ static int _resume_v3_hw(struct device *device)
dev_warn(dev, "resuming from operating state [D%d]\n",
device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_err(dev, "enable device failed during resume (%d)\n", rc);
- return rc;
- }
- pci_set_master(pdev);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
@@ -3513,17 +4686,16 @@ static int _resume_v3_hw(struct device *device)
rc = hw_init_v3_hw(hisi_hba);
if (rc) {
scsi_remove_host(shost);
- pci_disable_device(pdev);
return rc;
}
- hisi_hba->hw->phys_init(hisi_hba);
+ phys_init_v3_hw(hisi_hba);
sas_resume_ha(sha);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
return 0;
}
-static int suspend_v3_hw(struct device *device)
+static int __maybe_unused suspend_v3_hw(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
@@ -3539,7 +4711,7 @@ static int suspend_v3_hw(struct device *device)
return rc;
}
-static int resume_v3_hw(struct device *device)
+static int __maybe_unused resume_v3_hw(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
@@ -3562,21 +4734,10 @@ static const struct pci_error_handlers hisi_sas_err_handler = {
.reset_done = hisi_sas_reset_done_v3_hw,
};
-static int runtime_suspend_v3_hw(struct device *dev)
-{
- return suspend_v3_hw(dev);
-}
-
-static int runtime_resume_v3_hw(struct device *dev)
-{
- return resume_v3_hw(dev);
-}
-
-static const struct dev_pm_ops hisi_sas_v3_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(suspend_v3_hw, resume_v3_hw)
- SET_RUNTIME_PM_OPS(runtime_suspend_v3_hw,
- runtime_resume_v3_hw, NULL)
-};
+static UNIVERSAL_DEV_PM_OPS(hisi_sas_v3_pm_ops,
+ suspend_v3_hw,
+ resume_v3_hw,
+ NULL);
static struct pci_driver sas_v3_pci_driver = {
.name = DRV_NAME,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8df70c92911d..f4d3747cfa0b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3881,8 +3881,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
u8 sense_key, asc, ascq;
int sense_len;
int rc, ldstat = 0;
- u16 cmd_status;
- u8 scsi_status;
#define ASC_LUN_NOT_READY 0x04
#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
@@ -3902,8 +3900,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
else
sense_len = c->err_info->SenseLen;
decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
- cmd_status = c->err_info->CommandStatus;
- scsi_status = c->err_info->ScsiStatus;
cmd_free(h, c);
/* Determine the reason for not ready state */
@@ -4351,7 +4347,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
u32 ndev_allocated = 0;
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
- int i, n_ext_target_devs, ndevs_to_allocate;
+ int i, ndevs_to_allocate;
int raid_ctlr_position;
bool physical_device;
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
@@ -4416,7 +4412,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
raid_ctlr_position = nphysicals + nlogicals;
/* adjust our table of devices */
- n_ext_target_devs = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
u8 *lunaddrbytes, is_OBDR = 0;
int rc = 0;
@@ -4580,7 +4575,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
struct scsi_cmnd *cmd)
{
struct scatterlist *sg;
- int use_sg, i, sg_limit, chained, last_sg;
+ int use_sg, i, sg_limit, chained;
struct SGDescriptor *curr_sg;
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
@@ -4602,7 +4597,6 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
curr_sg = cp->SG;
chained = use_sg > h->max_cmd_sg_entries;
sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
- last_sg = scsi_sg_count(cmd) - 1;
scsi_for_each_sg(cmd, sg, sg_limit, i) {
hpsa_set_sg_descriptor(curr_sg, sg);
curr_sg++;
@@ -7442,7 +7436,6 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
dev_warn(&pdev->dev,
"base address is invalid\n");
return -1;
- break;
}
}
if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
@@ -8636,7 +8629,7 @@ static struct ctlr_info *hpda_alloc_ctlr_info(void)
static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int dac, rc;
+ int rc;
struct ctlr_info *h;
int try_soft_reset = 0;
unsigned long flags;
@@ -8712,13 +8705,9 @@ reinit_after_soft_reset:
/* configure PCI DMA stuff */
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc == 0) {
- dac = 1;
- } else {
+ if (rc != 0) {
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc == 0) {
- dac = 0;
- } else {
+ if (rc != 0) {
dev_err(&pdev->dev, "no suitable DMA available\n");
goto clean3; /* shost, pci, lu, aer/h */
}
@@ -9092,25 +9081,27 @@ static void hpsa_remove_one(struct pci_dev *pdev)
hpda_free_ctlr_info(h); /* init_one 1 */
}
-static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
- __attribute__((unused)) pm_message_t state)
+static int __maybe_unused hpsa_suspend(
+ __attribute__((unused)) struct device *dev)
{
return -ENOSYS;
}
-static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
+static int __maybe_unused hpsa_resume
+ (__attribute__((unused)) struct device *dev)
{
return -ENOSYS;
}
+static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume);
+
static struct pci_driver hpsa_pci_driver = {
.name = HPSA,
.probe = hpsa_init_one,
.remove = hpsa_remove_one,
.id_table = hpsa_pci_device_id, /* id_table */
.shutdown = hpsa_shutdown,
- .suspend = hpsa_suspend,
- .resume = hpsa_resume,
+ .driver.pm = &hpsa_pm_ops,
};
/* Fill in bucket_map[], given nsgs (the max number of
@@ -9299,10 +9290,9 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
} else if (trans_support & CFGTBL_Trans_io_accel2) {
u64 cfg_offset, cfg_base_addr_index;
u32 bft2_offset, cfg_base_addr;
- int rc;
- rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
- &cfg_base_addr_index, &cfg_offset);
+ hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 6a2561f26e38..db4c7a7ff4dd 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -758,7 +758,6 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
scp->result = SAM_STAT_CHECK_CONDITION;
memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
goto skip_resid;
- break;
default:
scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 070cf516b98f..65f168c41d23 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -138,6 +138,31 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
static const char *unknown_error = "unknown error";
+static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
+{
+ u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
+
+ return (host_caps & cap_flags) ? 1 : 0;
+}
+
+static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
+ struct ibmvfc_cmd *vfc_cmd)
+{
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ return &vfc_cmd->v2.iu;
+ else
+ return &vfc_cmd->v1.iu;
+}
+
+static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
+ struct ibmvfc_cmd *vfc_cmd)
+{
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ return &vfc_cmd->v2.rsp;
+ else
+ return &vfc_cmd->v1.rsp;
+}
+
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
* ibmvfc_trc_start - Log a start trace entry
@@ -149,6 +174,7 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
struct ibmvfc_trace_entry *entry;
entry = &vhost->trace[vhost->trace_index++];
@@ -159,11 +185,11 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
- entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->op_code = iu->cdb[0];
entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
- entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
- entry->tmf_flags = vfc_cmd->iu.tmf_flags;
- entry->u.start.xfer_len = be32_to_cpu(vfc_cmd->iu.xfer_len);
+ entry->lun = scsilun_to_int(&iu->lun);
+ entry->tmf_flags = iu->tmf_flags;
+ entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
break;
case IBMVFC_MAD_FORMAT:
entry->op_code = be32_to_cpu(mad->opcode);
@@ -183,6 +209,8 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
entry->evt = evt;
@@ -192,15 +220,15 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
- entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->op_code = iu->cdb[0];
entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
- entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
- entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+ entry->lun = scsilun_to_int(&iu->lun);
+ entry->tmf_flags = iu->tmf_flags;
entry->u.end.status = be16_to_cpu(vfc_cmd->status);
entry->u.end.error = be16_to_cpu(vfc_cmd->error);
- entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
- entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
- entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
+ entry->u.end.fcp_rsp_flags = rsp->flags;
+ entry->u.end.rsp_code = rsp->data.info.rsp_code;
+ entry->u.end.scsi_status = rsp->scsi_status;
break;
case IBMVFC_MAD_FORMAT:
entry->op_code = be32_to_cpu(mad->opcode);
@@ -260,10 +288,10 @@ static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
* Return value:
* SCSI result value to return for completed command
**/
-static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
+static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
{
int err;
- struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
if ((rsp->flags & FCP_RSP_LEN_VALID) &&
@@ -1227,7 +1255,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
- login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE);
+ login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
@@ -1378,6 +1406,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
int sg_mapped;
struct srp_direct_buf *data = &vfc_cmd->ioba;
struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
if (cls3_error)
vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
@@ -1394,10 +1423,10 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
- vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
+ iu->add_cdb_len |= IBMVFC_WRDATA;
} else {
vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
- vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
+ iu->add_cdb_len |= IBMVFC_RDDATA;
}
if (sg_mapped == 1) {
@@ -1516,7 +1545,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
{
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_host *vhost = evt->vhost;
- struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
struct scsi_cmnd *cmnd = evt->cmnd;
const char *err = unknown_error;
int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
@@ -1570,7 +1599,7 @@ static void ibmvfc_relogin(struct scsi_device *sdev)
static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
{
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
- struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
struct scsi_cmnd *cmnd = evt->cmnd;
u32 rsp_len = 0;
u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
@@ -1584,7 +1613,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
scsi_set_resid(cmnd, 0);
if (vfc_cmd->status) {
- cmnd->result = ibmvfc_get_err_result(vfc_cmd);
+ cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
if (rsp->flags & FCP_RSP_LEN_VALID)
rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
@@ -1646,6 +1675,33 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
return result;
}
+static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
+ size_t offset;
+
+ memset(vfc_cmd, 0, sizeof(*vfc_cmd));
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ offset = offsetof(struct ibmvfc_cmd, v2.rsp);
+ vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
+ } else
+ offset = offsetof(struct ibmvfc_cmd, v1.rsp);
+ vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
+ vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
+ vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
+ vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
+ vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
+ vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
+ int_to_scsilun(sdev->lun, &iu->lun);
+
+ return vfc_cmd;
+}
+
/**
* ibmvfc_queuecommand - The queuecommand function of the scsi template
* @cmnd: struct scsi_cmnd to be executed
@@ -1660,6 +1716,7 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct ibmvfc_cmd *vfc_cmd;
+ struct ibmvfc_fcp_cmd_iu *iu;
struct ibmvfc_event *evt;
int rc;
@@ -1675,24 +1732,20 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
evt->cmnd = cmnd;
cmnd->scsi_done = done;
- vfc_cmd = &evt->iu.cmd;
- memset(vfc_cmd, 0, sizeof(*vfc_cmd));
- vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
- vfc_cmd->resp.len = cpu_to_be32(sizeof(vfc_cmd->rsp));
- vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
- vfc_cmd->payload_len = cpu_to_be32(sizeof(vfc_cmd->iu));
- vfc_cmd->resp_len = cpu_to_be32(sizeof(vfc_cmd->rsp));
- vfc_cmd->cancel_key = cpu_to_be32((unsigned long)cmnd->device->hostdata);
- vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
- vfc_cmd->iu.xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
- int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
- memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
+
+ vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
+ iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
+
+ iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
+ memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
if (cmnd->flags & SCMD_TAGGED) {
vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
- vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
+ iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
}
+ vfc_cmd->correlation = cpu_to_be64((u64)evt);
+
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0);
@@ -2016,7 +2069,8 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt = NULL;
union ibmvfc_iu rsp_iu;
- struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ struct ibmvfc_fcp_cmd_iu *iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
int rsp_rc = -EBUSY;
unsigned long flags;
int rsp_code = 0;
@@ -2025,19 +2079,13 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
if (vhost->state == IBMVFC_ACTIVE) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
- tmf = &evt->iu.cmd;
- memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
- tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
- tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
- tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
- tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
- int_to_scsilun(sdev->lun, &tmf->iu.lun);
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
- tmf->iu.tmf_flags = type;
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ iu->tmf_flags = type;
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
@@ -2055,7 +2103,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
wait_for_completion(&evt->comp);
if (rsp_iu.cmd.status)
- rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+ rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
@@ -2227,12 +2275,17 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
- tmf->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ tmf->common.version = cpu_to_be32(2);
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ } else {
+ tmf->common.version = cpu_to_be32(1);
+ }
tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
tmf->common.length = cpu_to_be16(sizeof(*tmf));
tmf->scsi_id = cpu_to_be64(rport->port_id);
int_to_scsilun(sdev->lun, &tmf->lun);
- if (!(be64_to_cpu(vhost->login_buf->resp.capabilities) & IBMVFC_CAN_SUPPRESS_ABTS))
+ if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
if (vhost->state == IBMVFC_ACTIVE)
tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
@@ -2331,7 +2384,8 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp_iu;
- struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ struct ibmvfc_fcp_cmd_iu *iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
int rc, rsp_rc = -EBUSY;
unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
int rsp_code = 0;
@@ -2355,21 +2409,17 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
if (vhost->state == IBMVFC_ACTIVE) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
- tmf = &evt->iu.cmd;
- memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
- tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
- tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
- tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
- tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
- int_to_scsilun(sdev->lun, &tmf->iu.lun);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
- tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
evt->sync_iu = &rsp_iu;
+ tmf->correlation = cpu_to_be64((u64)evt);
+
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
@@ -2414,7 +2464,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
}
if (rsp_iu.cmd.status)
- rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+ rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
@@ -2957,8 +3007,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (sdev->type == TYPE_DISK)
+ if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
+ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
@@ -3025,7 +3077,7 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
+ return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
}
static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
@@ -3033,7 +3085,7 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
+ return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
}
/**
@@ -3445,7 +3497,12 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
evt->tgt = tgt;
prli = &evt->iu.prli;
memset(prli, 0, sizeof(*prli));
- prli->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ prli->common.version = cpu_to_be32(2);
+ prli->target_wwpn = cpu_to_be64(tgt->wwpn);
+ } else {
+ prli->common.version = cpu_to_be32(1);
+ }
prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
prli->common.length = cpu_to_be16(sizeof(*prli));
prli->scsi_id = cpu_to_be64(tgt->scsi_id);
@@ -3548,7 +3605,12 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
evt->tgt = tgt;
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
- plogi->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ plogi->common.version = cpu_to_be32(2);
+ plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
+ } else {
+ plogi->common.version = cpu_to_be32(1);
+ }
plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
plogi->common.length = cpu_to_be16(sizeof(*plogi));
plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
@@ -3948,7 +4010,12 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
evt->tgt = tgt;
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
- tmf->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ tmf->common.version = cpu_to_be32(2);
+ tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
+ } else {
+ tmf->common.version = cpu_to_be32(1);
+ }
tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
tmf->common.length = cpu_to_be16(sizeof(*tmf));
tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
@@ -4391,7 +4458,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
ibmvfc_dbg(vhost, "Sent NPIV login\n");
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
-};
+}
/**
* ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 34debccfb142..9d58cfd774d3 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -54,6 +54,7 @@
#define IBMVFC_MAD_SUCCESS 0x00
#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
+#define IBMVFC_MAD_VERSION_NOT_SUPP 0xF2
#define IBMVFC_MAD_FAILED 0xF7
#define IBMVFC_MAD_DRIVER_FAILED 0xEE
#define IBMVFC_MAD_CRQ_ERROR 0xEF
@@ -168,6 +169,8 @@ struct ibmvfc_npiv_login {
#define IBMVFC_CAN_MIGRATE 0x01
#define IBMVFC_CAN_USE_CHANNELS 0x02
#define IBMVFC_CAN_HANDLE_FPIN 0x04
+#define IBMVFC_CAN_USE_MAD_VERSION 0x08
+#define IBMVFC_CAN_SEND_VF_WWPN 0x10
__be64 node_name;
struct srp_direct_buf async;
u8 partition_name[IBMVFC_MAX_NAME];
@@ -211,7 +214,9 @@ struct ibmvfc_npiv_login_resp {
__be64 capabilities;
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
-#define IBMVFC_CAN_SUPPORT_CHANNELS 0x20
+#define IBMVFC_MAD_VERSION_CAP 0x20
+#define IBMVFC_HANDLE_VF_WWPN 0x40
+#define IBMVFC_CAN_SUPPORT_CHANNELS 0x80
__be32 max_cmds;
__be32 scsi_id_sz;
__be64 max_dma_len;
@@ -293,6 +298,7 @@ struct ibmvfc_port_login {
__be32 reserved2;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
+ __be64 target_wwpn;
__be64 reserved3[2];
} __packed __aligned(8);
@@ -344,6 +350,7 @@ struct ibmvfc_process_login {
__be16 status;
__be16 error; /* also fc_reason */
__be32 reserved2;
+ __be64 target_wwpn;
__be64 reserved3[2];
} __packed __aligned(8);
@@ -378,6 +385,8 @@ struct ibmvfc_tmf {
__be32 cancel_key;
__be32 my_cancel_key;
__be32 pad;
+ __be64 target_wwpn;
+ __be64 task_tag;
__be64 reserved[2];
} __packed __aligned(8);
@@ -474,9 +483,19 @@ struct ibmvfc_cmd {
__be64 correlation;
__be64 tgt_scsi_id;
__be64 tag;
- __be64 reserved3[2];
- struct ibmvfc_fcp_cmd_iu iu;
- struct ibmvfc_fcp_rsp rsp;
+ __be64 target_wwpn;
+ __be64 reserved3;
+ union {
+ struct {
+ struct ibmvfc_fcp_cmd_iu iu;
+ struct ibmvfc_fcp_rsp rsp;
+ } v1;
+ struct {
+ __be64 reserved4;
+ struct ibmvfc_fcp_cmd_iu iu;
+ struct ibmvfc_fcp_rsp rsp;
+ } v2;
+ };
} __packed __aligned(8);
struct ibmvfc_passthru_fc_iu {
@@ -503,6 +522,7 @@ struct ibmvfc_passthru_iu {
__be64 correlation;
__be64 scsi_id;
__be64 tag;
+ __be64 target_wwpn;
__be64 reserved2[2];
} __packed __aligned(8);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b0aa58d117cc..e451102b9a29 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9487,7 +9487,6 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
ipr_pci_perm_failure(pdev);
return PCI_ERS_RESULT_DISCONNECT;
- break;
default:
break;
}
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 93bc9019667f..c452849e7bb4 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -715,10 +715,6 @@ static int isci_suspend(struct device *dev)
isci_host_deinit(ihost);
}
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
@@ -726,19 +722,7 @@ static int isci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct isci_host *ihost;
- int rc, i;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- rc = pcim_enable_device(pdev);
- if (rc) {
- dev_err(&pdev->dev,
- "enabling device failure after resume(%d)\n", rc);
- return rc;
- }
-
- pci_set_master(pdev);
+ int i;
for_each_isci_host(i, ihost, pdev) {
sas_prep_resume_ha(&ihost->sas_ha);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 7041e2e3ab48..1b87d9080ebe 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -753,7 +753,6 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
default:
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
- break;
}
return SCI_SUCCESS;
case SCI_PHY_SUB_AWAIT_IAF_UF:
@@ -958,7 +957,6 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
default:
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE_INVALID_STATE;
- break;
}
return SCI_SUCCESS;
default:
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index df47557a02a3..a9ce6298b935 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -180,7 +180,7 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
}
/**
- * iscsi_write_space - Called when more output buffer space is available
+ * iscsi_sw_tcp_write_space - Called when more output buffer space is available
* @sk: socket space is available for
**/
static void iscsi_sw_tcp_write_space(struct sock *sk)
@@ -353,7 +353,7 @@ error:
}
/**
- * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+ * iscsi_sw_tcp_xmit_qlen - return the number of bytes queued for xmit
* @conn: iscsi connection
*/
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 13a2e7c33cb1..8d3006edbe12 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -15,7 +15,7 @@
#include <scsi/fc/fc_ns.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
/**
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
new file mode 100644
index 000000000000..602c97a651bc
--- /dev/null
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -0,0 +1,707 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2008 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_ENCODE_H_
+#define _FC_ENCODE_H_
+#include <asm/unaligned.h>
+#include <linux/utsname.h>
+#include <scsi/fc/fc_ms.h>
+
+/*
+ * F_CTL values for simple requests and responses.
+ */
+#define FC_FCTL_REQ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)
+#define FC_FCTL_RESP (FC_FC_EX_CTX | FC_FC_LAST_SEQ | \
+ FC_FC_END_SEQ | FC_FC_SEQ_INIT)
+
+struct fc_ns_rft {
+ struct fc_ns_fid fid; /* port ID object */
+ struct fc_ns_fts fts; /* FC4-types object */
+};
+
+struct fc_ct_req {
+ struct fc_ct_hdr hdr;
+ union {
+ struct fc_ns_gid_ft gid;
+ struct fc_ns_rn_id rn;
+ struct fc_ns_rft rft;
+ struct fc_ns_rff_id rff;
+ struct fc_ns_fid fid;
+ struct fc_ns_rsnn snn;
+ struct fc_ns_rspn spn;
+ struct fc_fdmi_rhba rhba;
+ struct fc_fdmi_rpa rpa;
+ struct fc_fdmi_dprt dprt;
+ struct fc_fdmi_dhba dhba;
+ } payload;
+};
+
+/**
+ * fc_adisc_fill() - Fill in adisc request frame
+ * @lport: local port.
+ * @fp: fc frame where payload will be placed.
+ */
+static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_adisc *adisc;
+
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_ADISC;
+ put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn);
+ put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn);
+ hton24(adisc->adisc_port_id, lport->port_id);
+}
+
+/**
+ * fc_ct_hdr_fill- fills ct header and reset ct payload
+ * returns pointer to ct request.
+ */
+static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
+ unsigned int op, size_t req_size,
+ enum fc_ct_fs_type fs_type,
+ u8 subtype)
+{
+ struct fc_ct_req *ct;
+ size_t ct_plen;
+
+ ct_plen = sizeof(struct fc_ct_hdr) + req_size;
+ ct = fc_frame_payload_get(fp, ct_plen);
+ memset(ct, 0, ct_plen);
+ ct->hdr.ct_rev = FC_CT_REV;
+ ct->hdr.ct_fs_type = fs_type;
+ ct->hdr.ct_fs_subtype = subtype;
+ ct->hdr.ct_cmd = htons((u16) op);
+ return ct;
+}
+
+/**
+ * fc_ct_ns_fill() - Fill in a name service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
+ */
+static inline int fc_ct_ns_fill(struct fc_lport *lport,
+ u32 fc_id, struct fc_frame *fp,
+ unsigned int op, enum fc_rctl *r_ctl,
+ enum fc_fh_type *fh_type)
+{
+ struct fc_ct_req *ct;
+ size_t len;
+
+ switch (op) {
+ case FC_NS_GPN_FT:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
+ break;
+
+ case FC_NS_GPN_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
+ hton24(ct->payload.fid.fp_fid, fc_id);
+ break;
+
+ case FC_NS_RFT_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.rft.fid.fp_fid, lport->port_id);
+ ct->payload.rft.fts = lport->fcts;
+ break;
+
+ case FC_NS_RFF_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id);
+ ct->payload.rff.fr_type = FC_TYPE_FCP;
+ if (lport->service_params & FCP_SPPF_INIT_FCN)
+ ct->payload.rff.fr_feat = FCP_FEAT_INIT;
+ if (lport->service_params & FCP_SPPF_TARG_FCN)
+ ct->payload.rff.fr_feat |= FCP_FEAT_TARG;
+ break;
+
+ case FC_NS_RNN_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id);
+ put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn);
+ break;
+
+ case FC_NS_RSPN_ID:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len,
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
+ strncpy(ct->payload.spn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
+ ct->payload.spn.fr_name_len = len;
+ break;
+
+ case FC_NS_RSNN_NN:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len,
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn);
+ strncpy(ct->payload.snn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
+ ct->payload.snn.fr_name_len = len;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ *r_ctl = FC_RCTL_DD_UNSOL_CTL;
+ *fh_type = FC_TYPE_CT;
+ return 0;
+}
+
+static inline void fc_ct_ms_fill_attr(struct fc_fdmi_attr_entry *entry,
+ const char *in, size_t len)
+{
+ int copied = strscpy(entry->value, in, len);
+ if (copied > 0)
+ memset(entry->value, copied, len - copied);
+}
+
+/**
+ * fc_ct_ms_fill() - Fill in a mgmt service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
+ */
+static inline int fc_ct_ms_fill(struct fc_lport *lport,
+ u32 fc_id, struct fc_frame *fp,
+ unsigned int op, enum fc_rctl *r_ctl,
+ enum fc_fh_type *fh_type)
+{
+ struct fc_ct_req *ct;
+ size_t len;
+ struct fc_fdmi_attr_entry *entry;
+ struct fs_fdmi_attrs *hba_attrs;
+ int numattrs = 0;
+
+ switch (op) {
+ case FC_FDMI_RHBA:
+ numattrs = 10;
+ len = sizeof(struct fc_fdmi_rhba);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+ len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+ len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+ len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+ len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+
+ /* HBA Identifier */
+ put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id);
+ /* Number of Ports - always 1 */
+ put_unaligned_be32(1, &ct->payload.rhba.port.numport);
+ /* Port Name */
+ put_unaligned_be64(lport->wwpn,
+ &ct->payload.rhba.port.port[0].portname);
+
+ /* HBA Attributes */
+ put_unaligned_be32(numattrs,
+ &ct->payload.rhba.hba_attrs.numattrs);
+ hba_attrs = &ct->payload.rhba.hba_attrs;
+ entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
+ /* NodeName*/
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NODENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(lport->wwnn,
+ (__be64 *)&entry->value[0]);
+
+ /* Manufacturer */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NODENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MANUFACTURER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_manufacturer(lport->host),
+ FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
+
+ /* SerialNumber */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_SERIALNUMBER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_serial_number(lport->host),
+ FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
+
+ /* Model */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MODEL,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_model(lport->host),
+ FC_FDMI_HBA_ATTR_MODEL_LEN);
+
+ /* Model Description */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MODEL_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_model_description(lport->host),
+ FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
+
+ /* Hardware Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_HARDWAREVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_hardware_version(lport->host),
+ FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
+
+ /* Driver Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_DRIVERVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_driver_version(lport->host),
+ FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
+
+ /* OptionROM Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_OPTIONROMVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_optionrom_version(lport->host),
+ FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
+
+ /* Firmware Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_firmware_version(lport->host),
+ FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
+
+ /* OS Name and Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_OSNAMEVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ snprintf((char *)&entry->value,
+ FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN,
+ "%s v%s",
+ init_utsname()->sysname,
+ init_utsname()->release);
+ break;
+ case FC_FDMI_RPA:
+ numattrs = 6;
+ len = sizeof(struct fc_fdmi_rpa);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+ len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+
+ /* Port Name */
+ put_unaligned_be64(lport->wwpn,
+ &ct->payload.rpa.port.portname);
+
+ /* Port Attributes */
+ put_unaligned_be32(numattrs,
+ &ct->payload.rpa.hba_attrs.numattrs);
+
+ hba_attrs = &ct->payload.rpa.hba_attrs;
+ entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
+
+ /* FC4 types */
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_FC4TYPES,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ memcpy(&entry->value, fc_host_supported_fc4s(lport->host),
+ FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+
+ /* Supported Speed */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+
+ put_unaligned_be32(fc_host_supported_speeds(lport->host),
+ &entry->value);
+
+ /* Current Port Speed */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(lport->link_speed,
+ &entry->value);
+
+ /* Max Frame Size */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_maxframe_size(lport->host),
+ &entry->value);
+
+ /* OS Device Name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_OSDEVICENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ /* Use the sysfs device name */
+ fc_ct_ms_fill_attr(entry,
+ dev_name(&lport->host->shost_gendev),
+ strnlen(dev_name(&lport->host->shost_gendev),
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
+
+ /* Host Name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_HOSTNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ if (strlen(fc_host_system_hostname(lport->host)))
+ fc_ct_ms_fill_attr(entry,
+ fc_host_system_hostname(lport->host),
+ strnlen(fc_host_system_hostname(lport->host),
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
+ else
+ fc_ct_ms_fill_attr(entry,
+ init_utsname()->nodename,
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
+ break;
+ case FC_FDMI_DPRT:
+ len = sizeof(struct fc_fdmi_dprt);
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+ /* Port Name */
+ put_unaligned_be64(lport->wwpn,
+ &ct->payload.dprt.port.portname);
+ break;
+ case FC_FDMI_DHBA:
+ len = sizeof(struct fc_fdmi_dhba);
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+ /* HBA Identifier */
+ put_unaligned_be64(lport->wwpn, &ct->payload.dhba.hbaid.id);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *r_ctl = FC_RCTL_DD_UNSOL_CTL;
+ *fh_type = FC_TYPE_CT;
+ return 0;
+}
+
+/**
+ * fc_ct_fill() - Fill in a common transport service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
+ */
+static inline int fc_ct_fill(struct fc_lport *lport,
+ u32 fc_id, struct fc_frame *fp,
+ unsigned int op, enum fc_rctl *r_ctl,
+ enum fc_fh_type *fh_type, u32 *did)
+{
+ int rc = -EINVAL;
+
+ switch (fc_id) {
+ case FC_FID_MGMT_SERV:
+ rc = fc_ct_ms_fill(lport, fc_id, fp, op, r_ctl, fh_type);
+ *did = FC_FID_MGMT_SERV;
+ break;
+ case FC_FID_DIR_SERV:
+ default:
+ rc = fc_ct_ns_fill(lport, fc_id, fp, op, r_ctl, fh_type);
+ *did = FC_FID_DIR_SERV;
+ break;
+ }
+
+ return rc;
+}
+/**
+ * fc_plogi_fill - Fill in plogi request frame
+ */
+static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
+ unsigned int op)
+{
+ struct fc_els_flogi *plogi;
+ struct fc_els_csp *csp;
+ struct fc_els_cssp *cp;
+
+ plogi = fc_frame_payload_get(fp, sizeof(*plogi));
+ memset(plogi, 0, sizeof(*plogi));
+ plogi->fl_cmd = (u8) op;
+ put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
+
+ csp = &plogi->fl_csp;
+ csp->sp_hi_ver = 0x20;
+ csp->sp_lo_ver = 0x20;
+ csp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ csp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ csp->sp_features = htons(FC_SP_FT_CIRO);
+ csp->sp_tot_seq = htons(255); /* seq. we accept */
+ csp->sp_rel_off = htons(0x1f);
+ csp->sp_e_d_tov = htonl(lport->e_d_tov);
+
+ cp->cp_rdfs = htons((u16) lport->mfs);
+ cp->cp_con_seq = htons(255);
+ cp->cp_open_seq = 1;
+}
+
+/**
+ * fc_flogi_fill - Fill in a flogi request frame.
+ */
+static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+ struct fc_els_flogi *flogi;
+
+ flogi = fc_frame_payload_get(fp, sizeof(*flogi));
+ memset(flogi, 0, sizeof(*flogi));
+ flogi->fl_cmd = (u8) ELS_FLOGI;
+ put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
+ sp = &flogi->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ if (lport->does_npiv)
+ sp->sp_features = htons(FC_SP_FT_NPIV);
+}
+
+/**
+ * fc_fdisc_fill - Fill in a fdisc request frame.
+ */
+static inline void fc_fdisc_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+ struct fc_els_flogi *fdisc;
+
+ fdisc = fc_frame_payload_get(fp, sizeof(*fdisc));
+ memset(fdisc, 0, sizeof(*fdisc));
+ fdisc->fl_cmd = (u8) ELS_FDISC;
+ put_unaligned_be64(lport->wwpn, &fdisc->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &fdisc->fl_wwnn);
+ sp = &fdisc->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &fdisc->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+}
+
+/**
+ * fc_logo_fill - Fill in a logo request frame.
+ */
+static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_logo *logo;
+
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ memset(logo, 0, sizeof(*logo));
+ logo->fl_cmd = ELS_LOGO;
+ hton24(logo->fl_n_port_id, lport->port_id);
+ logo->fl_n_port_wwn = htonll(lport->wwpn);
+}
+
+/**
+ * fc_rtv_fill - Fill in RTV (read timeout value) request frame.
+ */
+static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_rtv *rtv;
+
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ memset(rtv, 0, sizeof(*rtv));
+ rtv->rtv_cmd = ELS_RTV;
+}
+
+/**
+ * fc_rec_fill - Fill in rec request frame
+ */
+static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_rec *rec;
+ struct fc_exch *ep = fc_seq_exch(fr_seq(fp));
+
+ rec = fc_frame_payload_get(fp, sizeof(*rec));
+ memset(rec, 0, sizeof(*rec));
+ rec->rec_cmd = ELS_REC;
+ hton24(rec->rec_s_id, lport->port_id);
+ rec->rec_ox_id = htons(ep->oxid);
+ rec->rec_rx_id = htons(ep->rxid);
+}
+
+/**
+ * fc_prli_fill - Fill in prli request frame
+ */
+static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ memset(pp, 0, sizeof(*pp));
+ pp->prli.prli_cmd = ELS_PRLI;
+ pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
+ pp->prli.prli_len = htons(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+ pp->spp.spp_params = htonl(lport->service_params);
+}
+
+/**
+ * fc_scr_fill - Fill in a scr request frame.
+ */
+static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_scr *scr;
+
+ scr = fc_frame_payload_get(fp, sizeof(*scr));
+ memset(scr, 0, sizeof(*scr));
+ scr->scr_cmd = ELS_SCR;
+ scr->scr_reg_func = ELS_SCRF_FULL;
+}
+
+/**
+ * fc_els_fill - Fill in an ELS request frame
+ */
+static inline int fc_els_fill(struct fc_lport *lport,
+ u32 did,
+ struct fc_frame *fp, unsigned int op,
+ enum fc_rctl *r_ctl, enum fc_fh_type *fh_type)
+{
+ switch (op) {
+ case ELS_ADISC:
+ fc_adisc_fill(lport, fp);
+ break;
+
+ case ELS_PLOGI:
+ fc_plogi_fill(lport, fp, ELS_PLOGI);
+ break;
+
+ case ELS_FLOGI:
+ fc_flogi_fill(lport, fp);
+ break;
+
+ case ELS_FDISC:
+ fc_fdisc_fill(lport, fp);
+ break;
+
+ case ELS_LOGO:
+ fc_logo_fill(lport, fp);
+ break;
+
+ case ELS_RTV:
+ fc_rtv_fill(lport, fp);
+ break;
+
+ case ELS_REC:
+ fc_rec_fill(lport, fp);
+ break;
+
+ case ELS_PRLI:
+ fc_prli_fill(lport, fp);
+ break;
+
+ case ELS_SCR:
+ fc_scr_fill(lport, fp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *r_ctl = FC_RCTL_ELS_REQ;
+ *fh_type = FC_TYPE_ELS;
+ return 0;
+}
+#endif /* _FC_ENCODE_H_ */
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 96a2952cf626..841000445b9a 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -20,7 +20,6 @@
#include <scsi/fc/fc_fc2.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
#include "fc_libfc.h"
@@ -272,7 +271,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
if (f_ctl & FC_FC_END_SEQ) {
fr_eof(fp) = FC_EOF_T;
- if (fc_sof_needs_ack(ep->class))
+ if (fc_sof_needs_ack((enum fc_sof)ep->class))
fr_eof(fp) = FC_EOF_N;
/*
* From F_CTL.
@@ -1624,8 +1623,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
}
/*
@@ -1644,6 +1648,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
+skip_resp:
fc_exch_release(ep);
return;
rel:
@@ -1900,10 +1905,16 @@ static void fc_exch_reset(struct fc_exch *ep)
fc_exch_hold(ep);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
fc_seq_set_resp(sp, NULL, ep->arg);
fc_exch_release(ep);
}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 7cfeb6886237..b43b5f62ee3e 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -26,8 +26,8 @@
#include <scsi/fc/fc_fc2.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
static struct kmem_cache *scsi_pkt_cachep;
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 19c4ab4e0f4d..0e6a1355d020 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -12,8 +12,8 @@
#include <linux/module.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
MODULE_AUTHOR("Open-FCoE.org");
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 6557fda85c5c..22826544da7e 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -84,9 +84,9 @@
#include <scsi/fc/fc_gs.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
#include <linux/scatterlist.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
/* Fabric IDs to use for point-to-point mode, chosen on whims. */
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index a60b228d13f1..56003208d2e7 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -58,8 +58,8 @@
#include <asm/unaligned.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
+#include "fc_encode.h"
#include "fc_libfc.h"
static struct workqueue_struct *rport_event_queue;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f9314f1393fb..4e668aafbcca 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -780,7 +780,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
/**
- * iscsi_cmd_rsp - SCSI Command Response processing
+ * iscsi_scsi_cmd_rsp - SCSI Command Response processing
* @conn: iscsi connection
* @hdr: iscsi header
* @task: scsi command task
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 549adfaa97ce..a54c8da30273 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -664,11 +664,18 @@ struct lpfc_hba {
void (*lpfc_scsi_prep_cmnd)
(struct lpfc_vport *, struct lpfc_io_buf *,
struct lpfc_nodelist *);
+ int (*lpfc_scsi_prep_cmnd_buf)
+ (struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo);
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
+ int (*__lpfc_sli_issue_fcp_io)
+ (struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag);
void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
struct lpfc_iocbq *);
int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
@@ -744,7 +751,8 @@ struct lpfc_hba {
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
-#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
+#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
+#define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
@@ -753,7 +761,7 @@ struct lpfc_hba {
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
-#define ELS_XRI_ABORT_EVENT 0x40
+#define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
@@ -922,6 +930,7 @@ struct lpfc_hba {
#define LPFC_ENABLE_NVME 2
#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
+ uint32_t cfg_enable_mi;
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
@@ -1129,8 +1138,6 @@ struct lpfc_hba {
uint8_t hb_outstanding;
struct timer_list rrq_tmr;
enum hba_temp_state over_temp_state;
- /* ndlp reference management */
- spinlock_t ndlp_lock;
/*
* Following bit will be set for all buffer tags which are not
* associated with any HBQ.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e94eac194676..4528166dee36 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -57,10 +57,6 @@
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
-#define LPFC_DEF_MRQ_POST 512
-#define LPFC_MIN_MRQ_POST 512
-#define LPFC_MAX_MRQ_POST 2048
-
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
@@ -372,11 +368,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
- spin_lock(&vport->phba->hbalock);
+ spin_lock(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
- spin_unlock(&vport->phba->hbalock);
+ spin_unlock(&ndlp->lock);
if (!nrport)
continue;
@@ -1505,6 +1501,7 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
/**
* lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
* @phba: lpfc_hba pointer.
+ * @opcode: The sli4 config command opcode.
*
* Description:
* Request SLI4 interface type-2 device to perform a physical register set
@@ -2288,7 +2285,7 @@ lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
return -EINVAL;
}
-/**
+/*
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -2314,7 +2311,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
-/**
+/*
* lpfc_param_hex_show - Return a cfg attribute value in hex
*
* Description:
@@ -2342,7 +2339,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
-/**
+/*
* lpfc_param_init - Initializes a cfg attribute
*
* Description:
@@ -2376,7 +2373,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_param_set - Set a cfg attribute value
*
* Description:
@@ -2413,7 +2410,7 @@ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_param_store - Set a vport attribute value
*
* Description:
@@ -2453,7 +2450,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
return -EINVAL;\
}
-/**
+/*
* lpfc_vport_param_show - Return decimal formatted cfg attribute value
*
* Description:
@@ -2477,7 +2474,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
-/**
+/*
* lpfc_vport_param_hex_show - Return hex formatted attribute value
*
* Description:
@@ -2502,7 +2499,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
-/**
+/*
* lpfc_vport_param_init - Initialize a vport cfg attribute
*
* Description:
@@ -2535,7 +2532,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_vport_param_set - Set a vport cfg attribute
*
* Description:
@@ -2571,7 +2568,7 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_vport_param_store - Set a vport attribute
*
* Description:
@@ -2774,7 +2771,7 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
/**
* lpfc_soft_wwpn_store - Set the ww port name of the adapter
- * @dev class device that is converted into a Scsi_host.
+ * @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: contains the wwpn in hexadecimal.
* @count: number of wwpn bytes in buf
@@ -2871,7 +2868,8 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
/**
* lpfc_soft_wwnn_store - sets the ww node name of the adapter
- * @cdev: class device that is converted into a Scsi_host.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
* @buf: contains the ww node name in hexadecimal.
* @count: number of wwnn bytes in buf.
*
@@ -3207,9 +3205,11 @@ static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
* lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
* (OAS) operations.
* @phba: lpfc_hba pointer.
- * @ndlp: pointer to fcp target node.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
* @lun: the fc lun for setting oas state.
* @oas_state: the oas state to be set to the lun.
+ * @pri: priority
*
* Returns:
* SUCCESS : 0
@@ -3247,6 +3247,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
* @vpt_wwpn: wwpn of the vport associated with the returned lun
* @tgt_wwpn: wwpn of the target associated with the returned lun
* @lun_status: status of the lun returned lun
+ * @lun_pri: priority of the lun returned lun
*
* Returns the first or next lun enabled for OAS operations for the vport/target
* specified. If a lun is found, its vport wwpn, target wwpn and status is
@@ -3285,6 +3286,7 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
* @tgt_wwpn: target wwpn by reference.
* @lun: the fc lun for setting oas state.
* @oas_state: the oas state to be set to the oas_lun.
+ * @pri: priority
*
* This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
* a lun for OAS operations.
@@ -3359,6 +3361,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: buffer for passing information.
+ * @count: size of the formatting string
*
* This function sets the OAS state for lun. Before this function is called,
* the vport wwpn, target wwpn, and oas state need to be set.
@@ -3631,16 +3634,14 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
#if (IS_ENABLED(CONFIG_NVME_FC))
- spin_lock(&vport->phba->hbalock);
+ spin_lock(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
remoteport = rport->remoteport;
- spin_unlock(&vport->phba->hbalock);
+ spin_unlock(&ndlp->lock);
if (rport && remoteport)
nvme_fc_set_remoteport_devloss(remoteport,
vport->cfg_devloss_tmo);
@@ -3820,7 +3821,7 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
/**
* lpfc_tgt_queue_depth_store: Sets an attribute value.
- * @phba: pointer the the adapter structure.
+ * @vport: lpfc vport structure pointer.
* @val: integer attribute value.
*
* Description: Sets the parameter to the new value.
@@ -4005,8 +4006,10 @@ LPFC_ATTR(topology, 0, 0, 6,
/**
* lpfc_topology_set - Set the adapters topology field
- * @phba: lpfc_hba pointer.
- * @val: topology value.
+ * @dev: class device that is converted into a scsi_host.
+ * @attr:device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: size of the data buffer.
*
* Description:
* If val is in a valid range then set the adapter's topology field and
@@ -4125,6 +4128,7 @@ static DEVICE_ATTR_RO(lpfc_static_vport);
/**
* lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
* @dev: Pointer to class device.
+ * @attr: Unused.
* @buf: Data buffer.
* @count: Size of the data buffer.
*
@@ -4288,7 +4292,8 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
/**
* lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device object.
+ * @dev: Pointer to class device.
+ * @attr: Unused.
* @buf: Data buffer.
*
* This function is the read call back function for
@@ -4367,7 +4372,7 @@ static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
* @filp: sysfs file
* @kobj: Pointer to the kernel object
* @bin_attr: Attribute object
- * @buff: Buffer pointer
+ * @buf: Buffer pointer
* @off: File offset
* @count: Buffer size
*
@@ -4397,7 +4402,7 @@ sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
+ if (!ndlp->lat_data)
continue;
if (nport_index > 0) {
@@ -4454,8 +4459,10 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = {
*/
/**
* lpfc_link_speed_set - Set the adapters link speed
- * @phba: lpfc_hba pointer.
- * @val: link speed value.
+ * @dev: Pointer to class device.
+ * @attr: Unused.
+ * @buf: Data buffer.
+ * @count: Size of the data buffer.
*
* Description:
* If val is in a valid range then set the adapter's link speed field and
@@ -5463,8 +5470,6 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
spin_lock_irq(shost->host_lock);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
@@ -6138,6 +6143,14 @@ LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
*/
LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
+/*
+ * lpfc_enable_mi: Enable FDMI MIB
+ * 0 = disabled
+ * 1 = enabled (default)
+ * Value range is [0,1].
+ */
+LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
+
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_nvme_info,
&dev_attr_scsi_stat,
@@ -6255,6 +6268,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_ras_fwlog_func,
&dev_attr_lpfc_enable_bbcr,
&dev_attr_lpfc_enable_dpp,
+ &dev_attr_lpfc_enable_mi,
NULL,
};
@@ -6964,8 +6978,7 @@ lpfc_get_node_by_target(struct scsi_target *starget)
spin_lock_irq(shost->host_lock);
/* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp) &&
- ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
spin_unlock_irq(shost->host_lock);
return ndlp;
@@ -7040,7 +7053,7 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
else
rport->dev_loss_tmo = 1;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
dev_info(&rport->dev, "Cannot find remote node to "
"set rport dev loss tmo, port_id x%x\n",
rport->port_id);
@@ -7056,7 +7069,7 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
#endif
}
-/**
+/*
* lpfc_rport_show_function - Return rport target information
*
* Description:
@@ -7105,6 +7118,7 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
/**
* lpfc_hba_log_verbose_init - Set hba's log verbose level
* @phba: Pointer to lpfc_hba struct.
+ * @verbose: Verbose level to set.
*
* This function is called by the lpfc_get_cfgparam() routine to set the
* module lpfc_log_verbose into the @phba cfg_log_verbose for use with
@@ -7359,6 +7373,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
+ lpfc_enable_mi_init(phba, lpfc_enable_mi);
if (phba->sli_rev != LPFC_SLI_REV4) {
/* NVME only supported on SLI4 */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6f9d648a9b9c..eed6ea5e0722 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -329,7 +329,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, flags);
iocb = &dd_data->context_un.iocb;
- ndlp = iocb->ndlp;
+ ndlp = iocb->cmdiocbq->context_un.ndlp;
rmp = iocb->rmp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
@@ -366,8 +366,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
lpfc_free_bsg_buffers(phba, rmp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
- lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_nlp_put(ndlp);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
kfree(dd_data);
/* Complete the job if the job is still active */
@@ -408,6 +408,9 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
/* in case no data is transferred */
bsg_reply->reply_payload_rcv_len = 0;
+ if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
+ return -ENODEV;
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -417,20 +420,10 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
goto no_dd_data;
}
- if (!lpfc_nlp_get(ndlp)) {
- rc = -ENODEV;
- goto no_ndlp;
- }
-
- if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
- rc = -ENODEV;
- goto free_ndlp;
- }
-
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
rc = -ENOMEM;
- goto free_ndlp;
+ goto free_dd;
}
cmd = &cmdiocbq->iocb;
@@ -496,11 +489,10 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
- cmdiocbq->context_un.ndlp = ndlp;
+
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
- dd_data->context_un.iocb.ndlp = ndlp;
dd_data->context_un.iocb.rmp = rmp;
job->dd_data = dd_data;
@@ -514,8 +506,13 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
- iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+ cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->context_un.ndlp) {
+ rc = -ENODEV;
+ goto free_rmp;
+ }
+ iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (iocb_stat == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed yet */
@@ -532,7 +529,7 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
}
/* iocb failed so cleanup */
- job->dd_data = NULL;
+ lpfc_nlp_put(ndlp);
free_rmp:
lpfc_free_bsg_buffers(phba, rmp);
@@ -544,9 +541,7 @@ free_bmp:
kfree(bmp);
free_cmdiocbq:
lpfc_sli_release_iocbq(phba, cmdiocbq);
-free_ndlp:
- lpfc_nlp_put(ndlp);
-no_ndlp:
+free_dd:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
@@ -640,8 +635,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
}
}
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, cmdiocbq);
+
+ lpfc_nlp_put(ndlp);
kfree(dd_data);
/* Complete the job if the job is still active */
@@ -718,15 +714,14 @@ lpfc_bsg_rport_els(struct bsg_job *job)
goto release_ndlp;
}
- rpi = ndlp->nlp_rpi;
-
/* Transfer the request payload to allocated command dma buffer */
-
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
cmdsize);
+ rpi = ndlp->nlp_rpi;
+
if (phba->sli_rev == LPFC_SLI_REV4)
cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
else
@@ -752,8 +747,13 @@ lpfc_bsg_rport_els(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+ cmdiocbq->context1 = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->context1) {
+ rc = -EIO;
+ goto linkdown_err;
+ }
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
@@ -769,11 +769,9 @@ lpfc_bsg_rport_els(struct bsg_job *job)
rc = -EIO;
}
- /* iocb failed so cleanup */
- job->dd_data = NULL;
+ /* I/O issue failed. Cleanup resources. */
linkdown_err:
- cmdiocbq->context1 = ndlp;
lpfc_els_free_iocb(phba, cmdiocbq);
release_ndlp:
@@ -902,11 +900,8 @@ diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
return 0;
}
-/**
+/*
* lpfc_bsg_ct_unsol_event - process an unsolicited CT command
- * @phba:
- * @pring:
- * @piocbq:
*
* This function is called when an unsolicited CT command is received. It
* forwards the event to any processes registered to receive CT events.
@@ -939,28 +934,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
- if (piocbq->iocb.ulpBdeCount == 0 ||
- piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
- goto error_ct_unsol_exit;
-
- if (phba->link_state == LPFC_HBA_ERROR ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
- goto error_ct_unsol_exit;
-
- if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
- dmabuf = bdeBuf1;
- else {
- dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
- piocbq->iocb.un.cont64[0].addrLow);
- dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
- }
- if (dmabuf == NULL)
- goto error_ct_unsol_exit;
- ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
+ ct_req = (struct lpfc_sli_ct_request *)bdeBuf1;
evt_req_id = ct_req->FsType;
cmd = ct_req->CommandResponse.bits.CmdRsp;
- if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
@@ -1474,7 +1450,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
* @phba: Pointer to HBA context object.
* @job: Pointer to the job object.
* @tag: tag index value into the ports context exchange array.
- * @bmp: Pointer to a dma buffer descriptor.
+ * @cmp: Pointer to a cmp dma buffer descriptor.
+ * @bmp: Pointer to a bmp dma buffer descriptor.
* @num_entry: Number of enties in the bde.
**/
static int
@@ -1490,6 +1467,15 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
unsigned long flags;
uint32_t creg_val;
+ ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
+ if (!ndlp) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "2721 ndlp null for oxid %x SID %x\n",
+ phba->ct_ctx[tag].rxid,
+ phba->ct_ctx[tag].SID);
+ return IOCB_ERROR;
+ }
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -1540,12 +1526,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
goto issue_ct_rsp_exit;
}
- /* Check if the ndlp is active */
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
-
/* get a refernece count so the ndlp doesn't go away while
* we respond
*/
@@ -1580,7 +1560,11 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = ctiocb;
- dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
+ if (!dd_data->context_un.iocb.ndlp) {
+ rc = -IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
dd_data->context_un.iocb.rmp = NULL;
job->dd_data = dd_data;
@@ -1595,7 +1579,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
-
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
@@ -1609,6 +1592,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
/* iocb failed so cleanup */
job->dd_data = NULL;
+ lpfc_nlp_put(ndlp);
issue_ct_rsp_exit:
lpfc_sli_release_iocbq(phba, ctiocb);
@@ -3535,6 +3519,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
mb->mbxCommand);
return -EPERM;
}
+ break;
case MBX_WRITE_NV:
case MBX_WRITE_VPARMS:
case MBX_LOAD_SM:
@@ -3886,9 +3871,9 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
/**
* lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
+ * @job: Pointer to the job object.
* @nemb_tp: Enumerate of non-embedded mailbox command type.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
* non-embedded external bufffers.
@@ -4075,8 +4060,9 @@ job_error:
/**
* lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
* non-embedded external bufffers.
@@ -4241,8 +4227,8 @@ job_error:
/**
* lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
* external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
@@ -4393,7 +4379,7 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
/**
* lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
* @phba: Pointer to HBA context object.
- * @dmabuf: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
*
* This routine extracts the next mailbox read external buffer back to
* user space through BSG.
@@ -4463,6 +4449,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
/**
* lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
* @phba: Pointer to HBA context object.
+ * @job: Pointer to the job object.
* @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine sets up the next mailbox read external buffer obtained
@@ -4588,8 +4575,8 @@ job_error:
/**
* lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
* command with multiple non-embedded external buffers.
@@ -4633,8 +4620,8 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
/**
* lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
* (0x9B) mailbox commands and external buffers.
@@ -4707,7 +4694,7 @@ sli_cfg_ext_error:
/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a mailbox object.
+ * @job: Pointer to the job object.
* @vport: Pointer to a vport object.
*
* Allocate a tracking object, mailbox command memory, get a mailbox
@@ -5935,7 +5922,7 @@ lpfc_bsg_timeout(struct bsg_job *job)
}
}
if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
@@ -5972,7 +5959,7 @@ lpfc_bsg_timeout(struct bsg_job *job)
}
}
if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 782f6f76f18a..f78e52a18b0b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -88,8 +88,6 @@ void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
-struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
- struct lpfc_nodelist *, int);
void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_set_disctmo(struct lpfc_vport *);
@@ -322,8 +320,12 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
+int lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag);
int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe);
+int lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocb, void *cmpl);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
@@ -348,7 +350,7 @@ int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
int lpfc_sli_hbq_size(void);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
- struct lpfc_iocbq *);
+ struct lpfc_iocbq *, void *);
int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
uint64_t, lpfc_ctx_cmd);
@@ -371,6 +373,8 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i,
+ struct lpfc_wcqe_complete *w);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
@@ -592,11 +596,13 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
struct lpfc_sli4_hdw_queue *qp);
void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd);
-void lpfc_nvme_cmd_template(void);
+void lpfc_wqe_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
-void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
extern unsigned long lpfc_no_hba_reset[];
+extern union lpfc_wqe128 lpfc_iread_cmd_template;
+extern union lpfc_wqe128 lpfc_iwrite_cmd_template;
+extern union lpfc_wqe128 lpfc_icmnd_cmd_template;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index a8bf4d0d58f0..dd0b432f7ac5 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -99,21 +99,265 @@ lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
}
+/**
+ * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands
+ * @phba : pointer to lpfc hba data structure.
+ * @cmdiocb : pointer to lpfc command iocb data structure.
+ * @rspiocb : pointer to lpfc response iocb data structure.
+ *
+ * This routine is the callback function for issuing unsol ct reject command.
+ * The memory allocated in the reject command path is freed up here.
+ **/
+static void
+lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp, *bmp;
+
+ ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ if (ndlp)
+ lpfc_nlp_put(ndlp);
+
+ mp = cmdiocb->context2;
+ bmp = cmdiocb->context3;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ cmdiocb->context2 = NULL;
+ }
+
+ if (bmp) {
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+ cmdiocb->context3 = NULL;
+ }
+
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
+ * lpfc_ct_reject_event : Issue reject for unhandled CT MIB commands
+ * @ndlp : pointer to a node-list data structure.
+ * ct_req : pointer to the CT request data structure.
+ * rx_id : rx_id of the received UNSOL CT command
+ * ox_id : ox_id of the UNSOL CT command
+ *
+ * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
+ * a reject response. Reject response is sent for the unhandled commands.
+ **/
+static void
+lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
+ struct lpfc_sli_ct_request *ct_req,
+ u16 rx_id, u16 ox_id)
+{
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ct_request *ct_rsp;
+ struct lpfc_iocbq *cmdiocbq = NULL;
+ struct lpfc_dmabuf *bmp = NULL;
+ struct lpfc_dmabuf *mp = NULL;
+ struct ulp_bde64 *bpl;
+ IOCB_t *icmd;
+ u8 rc = 0;
+
+ /* fill in BDEs for command */
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp) {
+ rc = 1;
+ goto ct_exit;
+ }
+
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys);
+ if (!mp->virt) {
+ rc = 2;
+ goto ct_free_mp;
+ }
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
+ if (!bmp) {
+ rc = 3;
+ goto ct_free_mpvirt;
+ }
+
+ bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys);
+ if (!bmp->virt) {
+ rc = 4;
+ goto ct_free_bmp;
+ }
+
+ INIT_LIST_HEAD(&mp->list);
+ INIT_LIST_HEAD(&bmp->list);
+
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ memset(bpl, 0, sizeof(struct ulp_bde64));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+ bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ ct_rsp = (struct lpfc_sli_ct_request *)mp->virt;
+ memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request));
+
+ ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ct_rsp->RevisionId.bits.InId = 0;
+ ct_rsp->FsType = ct_req->FsType;
+ ct_rsp->FsSubType = ct_req->FsSubType;
+ ct_rsp->CommandResponse.bits.Size = 0;
+ ct_rsp->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CT_RESPONSE_FS_RJT);
+ ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED;
+ ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL;
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ rc = 5;
+ goto ct_free_bmpvirt;
+ }
+
+ icmd = &cmdiocbq->iocb;
+ icmd->un.genreq64.bdl.ulpIoTag32 = 0;
+ icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.genreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+ icmd->un.genreq64.w5.hcsw.Fctl = (LS | LA);
+ icmd->un.genreq64.w5.hcsw.Dfctl = 0;
+ icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
+ icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+
+ /* Save for completion so we can release these resources */
+ cmdiocbq->context1 = lpfc_nlp_get(ndlp);
+ cmdiocbq->context2 = (uint8_t *)mp;
+ cmdiocbq->context3 = (uint8_t *)bmp;
+ cmdiocbq->iocb_cmpl = lpfc_ct_unsol_cmpl;
+ icmd->ulpContext = rx_id; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = ox_id;
+ icmd->un.ulpWord[3] =
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ icmd->ulpTimeout = (3 * phba->fc_ratov);
+
+ cmdiocbq->retry = 0;
+ cmdiocbq->vport = vport;
+ cmdiocbq->context_un.ndlp = NULL;
+ cmdiocbq->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+ if (!rc)
+ return;
+
+ rc = 6;
+ lpfc_nlp_put(ndlp);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ct_free_bmpvirt:
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ct_free_bmp:
+ kfree(bmp);
+ct_free_mpvirt:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ct_free_mp:
+ kfree(mp);
+ct_exit:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "6440 Unsol CT: Rsp err %d Data: x%x\n",
+ rc, vport->fc_flag);
+}
+
+/**
+ * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @ctiocb: pointer to lpfc CT command iocb data structure.
+ *
+ * This routine is used for processing the IOCB associated with a unsolicited
+ * CT MIB request. It first determines whether there is an existing ndlp that
+ * matches the DID from the unsolicited IOCB. If not, it will return.
+ **/
+static void
+lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
+{
+ struct lpfc_sli_ct_request *ct_req;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_vport *vport = NULL;
+ IOCB_t *icmd = &ctiocbq->iocb;
+ u32 mi_cmd, vpi;
+ u32 did = 0;
+
+ vpi = ctiocbq->iocb.unsli3.rcvsli3.vpi;
+ vport = lpfc_find_vport_by_vpid(phba, vpi);
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "6437 Unsol CT: VPORT NULL vpi : x%x\n",
+ vpi);
+ return;
+ }
+
+ did = ctiocbq->iocb.un.rcvels.remoteID;
+ if (icmd->ulpStatus) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6438 Unsol CT: status:x%x/x%x did : x%x\n",
+ icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ return;
+ }
+
+ /* Ignore traffic received during vport shutdown */
+ if (vport->fc_flag & FC_UNLOADING)
+ return;
+
+ ndlp = lpfc_findnode_did(vport, did);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6439 Unsol CT: NDLP Not Found for DID : x%x",
+ did);
+ return;
+ }
+
+ ct_req = ((struct lpfc_sli_ct_request *)
+ (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+
+ mi_cmd = ct_req->CommandResponse.bits.CmdRsp;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
+ lpfc_ct_reject_event(ndlp, ct_req,
+ ctiocbq->iocb.ulpContext,
+ ctiocbq->iocb.unsli3.rcvsli3.ox_id);
+}
+
+/**
+ * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @ctiocbq: pointer to lpfc ct iocb data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking appropriate routine
+ * after properly set up the iocb buffer from the SLI ring on which the
+ * unsolicited event was received.
+ **/
void
lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *piocbq)
+ struct lpfc_iocbq *ctiocbq)
{
struct lpfc_dmabuf *mp = NULL;
- IOCB_t *icmd = &piocbq->iocb;
+ IOCB_t *icmd = &ctiocbq->iocb;
int i;
struct lpfc_iocbq *iocbq;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
uint32_t size;
struct list_head head;
- struct lpfc_dmabuf *bdeBuf;
+ struct lpfc_sli_ct_request *ct_req;
+ struct lpfc_dmabuf *bdeBuf1 = ctiocbq->context2;
+ struct lpfc_dmabuf *bdeBuf2 = ctiocbq->context3;
- if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
- return;
+ ctiocbq->context1 = NULL;
+ ctiocbq->context2 = NULL;
+ ctiocbq->context3 = NULL;
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@@ -127,46 +371,75 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return;
}
- /* If there are no BDEs associated with this IOCB,
- * there is nothing to do.
+ /* If there are no BDEs associated
+ * with this IOCB, there is nothing to do.
*/
if (icmd->ulpBdeCount == 0)
return;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ ctiocbq->context2 = bdeBuf1;
+ if (icmd->ulpBdeCount == 2)
+ ctiocbq->context3 = bdeBuf2;
+ } else {
+ dma_addr = getPaddr(icmd->un.cont64[0].addrHigh,
+ icmd->un.cont64[0].addrLow);
+ ctiocbq->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
+ dma_addr);
+ if (icmd->ulpBdeCount == 2) {
+ dma_addr = getPaddr(icmd->un.cont64[1].addrHigh,
+ icmd->un.cont64[1].addrLow);
+ ctiocbq->context3 = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ dma_addr);
+ }
+ }
+
+ ct_req = ((struct lpfc_sli_ct_request *)
+ (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+
+ if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE &&
+ ct_req->FsSubType == SLI_CT_MIB_Subtypes) {
+ lpfc_ct_handle_mibreq(phba, ctiocbq);
+ } else {
+ if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq))
+ return;
+ }
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
INIT_LIST_HEAD(&head);
- list_add_tail(&head, &piocbq->list);
+ list_add_tail(&head, &ctiocbq->list);
list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb;
if (icmd->ulpBdeCount == 0)
continue;
- bdeBuf = iocbq->context2;
+ bdeBuf1 = iocbq->context2;
iocbq->context2 = NULL;
size = icmd->un.cont64[0].tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
- lpfc_in_buf_free(phba, bdeBuf);
+ lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
+ lpfc_in_buf_free(phba, bdeBuf1);
if (icmd->ulpBdeCount == 2) {
- bdeBuf = iocbq->context3;
+ bdeBuf2 = iocbq->context3;
iocbq->context3 = NULL;
size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf,
+ lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2,
size);
- lpfc_in_buf_free(phba, bdeBuf);
+ lpfc_in_buf_free(phba, bdeBuf2);
}
}
list_del(&head);
} else {
INIT_LIST_HEAD(&head);
- list_add_tail(&head, &piocbq->list);
+ list_add_tail(&head, &ctiocbq->list);
list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb;
if (icmd->ulpBdeCount == 0)
lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
for (i = 0; i < icmd->ulpBdeCount; i++) {
- paddr = getPaddr(icmd->un.cont64[i].addrHigh,
- icmd->un.cont64[i].addrLow);
+ dma_addr = getPaddr(icmd->un.cont64[i].addrHigh,
+ icmd->un.cont64[i].addrLow);
mp = lpfc_sli_ringpostbuf_get(phba, pring,
- paddr);
+ dma_addr);
size = icmd->un.cont64[i].tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
@@ -275,10 +548,8 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
{
struct lpfc_dmabuf *buf_ptr;
- if (ctiocb->context_un.ndlp) {
- lpfc_nlp_put(ctiocb->context_un.ndlp);
- ctiocb->context_un.ndlp = NULL;
- }
+ /* I/O job is complete so context is now invalid*/
+ ctiocb->context_un.ndlp = NULL;
if (ctiocb->context1) {
buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
@@ -345,7 +616,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Save for completion so we can release these resources */
geniocb->context1 = (uint8_t *) inp;
geniocb->context2 = (uint8_t *) outp;
- geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
/* Fill in payload, bp points to frame payload */
icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
@@ -384,16 +654,21 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
geniocb->retry = retry;
+ geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
+ if (!geniocb->context_un.ndlp)
+ goto out;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
geniocb->context_un.ndlp = NULL;
lpfc_nlp_put(ndlp);
- lpfc_sli_release_iocbq(phba, geniocb);
- return 1;
+ goto out;
}
return 0;
+out:
+ lpfc_sli_release_iocbq(phba, geniocb);
+ return 1;
}
/*
@@ -467,7 +742,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
ndlp = lpfc_setup_disc_node(vport, Did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
@@ -518,7 +793,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
* Don't even bother to send GFF_ID.
*/
ndlp = lpfc_findnode_did(vport, Did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ if (ndlp &&
(ndlp->nlp_type &
(NLP_FCP_TARGET | NLP_NVME_TARGET))) {
if (fc4_type == FC_TYPE_FCP)
@@ -550,7 +825,6 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
char *str;
if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT)
@@ -579,12 +853,12 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
if (ndlp->nlp_type != NLP_NVME_INITIATOR ||
ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
continue;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_DID == Did)
ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
else
ndlp->nlp_flag |= NLP_NVMET_RECOV;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
}
}
@@ -600,7 +874,6 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
uint32_t Did, CTentry;
int Cnt;
struct list_head head;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = NULL;
lpfc_set_disctmo(vport);
@@ -646,9 +919,9 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
}
@@ -861,8 +1134,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_start(vport);
}
out:
- cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -1068,8 +1341,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_start(vport);
}
out:
- cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
static void
@@ -1084,7 +1357,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_sli_ct_request *CTrsp;
int did, rc, retry;
uint8_t fbits;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL;
did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
did = be32_to_cpu(did);
@@ -1150,7 +1423,9 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->retry, did);
if (rc == 0) {
/* success */
+ free_ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
return;
}
}
@@ -1164,7 +1439,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* This is a target port, unregistered port, or the GFF_ID failed */
ndlp = lpfc_setup_disc_node(vport, did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0242 Process x%x GFF "
"NameServer Rsp Data: x%x x%x x%x\n",
@@ -1203,7 +1478,10 @@ out:
}
lpfc_disc_start(vport);
}
+
+ free_ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
return;
}
@@ -1217,7 +1495,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
struct lpfc_sli_ct_request *CTrsp;
int did;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_nodelist *ns_ndlp = NULL;
uint32_t fc4_data_0, fc4_data_1;
did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
@@ -1227,6 +1506,9 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"GFT_ID cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ /* Preserve the nameserver node to release the reference. */
+ ns_ndlp = cmdiocb->context_un.ndlp;
+
if (irsp->ulpStatus == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
@@ -1242,6 +1524,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ?
"NVME" : " ");
+ /* Lookup the NPort_ID queried in the GFT_ID and find the
+ * driver's local node. It's an error if the driver
+ * doesn't have one.
+ */
ndlp = lpfc_findnode_did(vport, did);
if (ndlp) {
/* The bitmask value for FCP and NVME FCP types is
@@ -1287,6 +1573,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ns_ndlp);
}
static void
@@ -1356,8 +1643,8 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
- cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -1599,8 +1886,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
int rc = 0;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)
- || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
rc=1;
goto ns_cmd_exit;
}
@@ -1841,11 +2127,6 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
}
rc=6;
- /* Decrement ndlp reference count to release ndlp reference held
- * for the failed command's callback function.
- */
- lpfc_nlp_put(ndlp);
-
ns_cmd_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
ns_cmd_free_bmp:
@@ -1882,7 +2163,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp, *free_ndlp = NULL;
uint32_t latt, cmd, err;
latt = lpfc_els_chk_latt(vport);
@@ -1928,10 +2209,13 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
irsp->un.ulpWord[4]);
}
+
+ free_ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return;
/* Check for a CT LS_RJT response */
@@ -1959,6 +2243,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
/* Start over */
lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
+ return;
}
if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
@@ -1968,12 +2253,21 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
case SLI_MGMT_RPA:
+ /* No retry on Vendor RPA */
+ if (phba->link_flag & LS_CT_VEN_RPA) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_DISCOVERY | LOG_ELS,
+ "6460 VEN FDMI RPA failure\n");
+ phba->link_flag &= ~LS_CT_VEN_RPA;
+ return;
+ }
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
/* Fallback to FDMI-1 */
vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
/* Start over */
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+ return;
}
if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
@@ -2004,6 +2298,33 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
break;
+ case SLI_MGMT_RPA:
+ if (vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->cfg_enable_mi &&
+ phba->sli4_hba.pc_sli4_params.mi_ver > LPFC_MIB1_SUPPORT) {
+ /* mi is only for the phyical port, no vports */
+ if (phba->link_flag & LS_CT_VEN_RPA) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS,
+ "6449 VEN RPA Success\n");
+ break;
+ }
+
+ if (lpfc_fdmi_cmd(vport, ndlp, cmd,
+ LPFC_FDMI_VENDOR_ATTR_mi) == 0)
+ phba->link_flag |= LS_CT_VEN_RPA;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS,
+ "6458 Send MI FDMI:%x Flag x%x\n",
+ phba->sli4_hba.pc_sli4_params.mi_value,
+ phba->link_flag);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS,
+ "6459 No FDMI VEN MI support - "
+ "RPA Success\n");
+ }
+ break;
}
return;
}
@@ -2033,7 +2354,7 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
return;
ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return;
/* Check if system hostname changed */
@@ -2974,6 +3295,28 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
return size;
}
+static int
+lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_def *ad)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+ char mibrevision[16];
+
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, 256);
+ sprintf(mibrevision, "ELXE2EM:%04d",
+ phba->sli4_hba.pc_sli4_params.mi_value);
+ strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString));
+ len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ size = FOURBYTES + len;
+ ad->AttrLen = cpu_to_be16(size);
+ ad->AttrType = cpu_to_be16(RPRT_VENDOR_MI);
+ return size;
+}
+
/* RHBA attribute jump table */
int (*lpfc_fdmi_hba_action[])
(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
@@ -3025,6 +3368,7 @@ int (*lpfc_fdmi_port_action[])
lpfc_fdmi_smart_attr_port_info, /* bit20 RPRT_SMART_PORT_INFO */
lpfc_fdmi_smart_attr_qos, /* bit21 RPRT_SMART_QOS */
lpfc_fdmi_smart_attr_security, /* bit22 RPRT_SMART_SECURITY */
+ lpfc_fdmi_vendor_attr_mi, /* bit23 RPRT_VENDOR_MI */
};
/**
@@ -3056,7 +3400,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return 0;
cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
@@ -3250,12 +3594,6 @@ port_out:
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
return 0;
- /*
- * Decrement ndlp reference count to release ndlp reference held
- * for the failed command's callback function.
- */
- lpfc_nlp_put(ndlp);
-
fdmi_cmd_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
fdmi_cmd_free_bmp:
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c9a327b13e5c..bc79a017e1a2 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -895,8 +895,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
len += scnprintf(buf + len,
size - len, "NVME_INITIATOR ");
- len += scnprintf(buf+len, size-len, "usgmap:%x ",
- ndlp->nlp_usg_map);
len += scnprintf(buf+len, size-len, "refcnt:%x",
kref_read(&ndlp->kref));
if (iocnt) {
@@ -957,13 +955,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(buf + len, size - len, "\tRport List:\n");
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
- spin_lock(&phba->hbalock);
+ spin_lock(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
else
nrport = NULL;
- spin_unlock(&phba->hbalock);
+ spin_unlock(&ndlp->lock);
if (!nrport)
continue;
@@ -3341,7 +3339,6 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
break;
case LPFC_PCI_CFG_BROWSE: /* browse all */
goto pcicfg_browse;
- break;
default:
/* illegal count */
len = 0;
@@ -4187,6 +4184,7 @@ lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
/**
* lpfc_idiag_queacc_read_qe - read a single entry from the given queue index
* @pbuffer: The pointer to buffer to copy the read data into.
+ * @len: Length of the buffer.
* @pque: The pointer to the queue to be read.
* @index: The index into the queue entry.
*
@@ -4381,7 +4379,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
goto error_out;
- break;
+
case LPFC_IDIAG_CQ:
/* MBX complete queue */
if (phba->sli4_hba.mbx_cq &&
@@ -4433,7 +4431,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
goto error_out;
- break;
+
case LPFC_IDIAG_MQ:
/* MBX work queue */
if (phba->sli4_hba.mbx_wq &&
@@ -4447,7 +4445,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
goto error_out;
- break;
+
case LPFC_IDIAG_WQ:
/* ELS work queue */
if (phba->sli4_hba.els_wq &&
@@ -4487,9 +4485,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
}
-
goto error_out;
- break;
+
case LPFC_IDIAG_RQ:
/* HDR queue */
if (phba->sli4_hba.hdr_rq &&
@@ -4514,10 +4511,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
goto error_out;
- break;
default:
goto error_out;
- break;
}
pass_check:
@@ -4766,7 +4761,7 @@ error_out:
* @phba: The pointer to hba structure.
* @pbuffer: The pointer to the buffer to copy the data to.
* @len: The length of bytes to copied.
- * @drbregid: The id to doorbell registers.
+ * @ctlregid: The id to doorbell registers.
*
* Description:
* This routine reads a control register and copies its content to the
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 482e4a888dae..ea07afcb750a 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -41,6 +41,7 @@ enum lpfc_work_type {
LPFC_EVT_DEV_LOSS,
LPFC_EVT_FASTPATH_MGMT_EVT,
LPFC_EVT_RESET_HBA,
+ LPFC_EVT_RECOVER_PORT
};
/* structure used to queue event to the discovery tasklet */
@@ -80,6 +81,9 @@ struct lpfc_nodelist {
struct list_head nlp_listp;
struct lpfc_name nlp_portname;
struct lpfc_name nlp_nodename;
+
+ spinlock_t lock; /* Node management lock */
+
uint32_t nlp_flag; /* entry flags */
uint32_t nlp_DID; /* FC D_ID of entry */
uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
@@ -115,12 +119,6 @@ struct lpfc_nodelist {
u8 nlp_nvme_info; /* NVME NSLER Support */
#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
- uint16_t nlp_usg_map; /* ndlp management usage bitmap */
-#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
-#define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */
-#define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */
-#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
-
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct lpfc_hba *phba;
struct fc_rport *rport; /* scsi_transport_fc port structure */
@@ -128,6 +126,7 @@ struct lpfc_nodelist {
struct lpfc_vport *vport;
struct lpfc_work_evt els_retry_evt;
struct lpfc_work_evt dev_loss_evt;
+ struct lpfc_work_evt recovery_evt;
struct kref kref;
atomic_t cmd_pending;
uint32_t cmd_qdepth;
@@ -135,13 +134,17 @@ struct lpfc_nodelist {
unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
- uint32_t upcall_flags;
+ uint32_t fc4_xpt_flags;
#define NLP_WAIT_FOR_UNREG 0x1
+#define SCSI_XPT_REGD 0x2
+#define NVME_XPT_REGD 0x4
+
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
uint32_t nlp_defer_did;
};
+
struct lpfc_node_rrq {
struct list_head list;
uint16_t xritag;
@@ -170,7 +173,7 @@ struct lpfc_node_rrq {
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */
-#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
+#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
@@ -189,32 +192,6 @@ struct lpfc_node_rrq {
#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
-
-/* ndlp usage management macros */
-#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
- & NLP_USG_NODE_ACT_BIT) \
- && \
- !((ndlp)->nlp_usg_map \
- & NLP_USG_FREE_ACK_BIT))
-#define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_NODE_ACT_BIT)
-#define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
- = NLP_USG_NODE_ACT_BIT)
-#define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
- &= ~NLP_USG_NODE_ACT_BIT)
-#define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
- & NLP_USG_IACT_REQ_BIT)
-#define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_IACT_REQ_BIT)
-#define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
- & NLP_USG_FREE_REQ_BIT)
-#define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_FREE_REQ_BIT)
-#define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
- & NLP_USG_FREE_ACK_BIT)
-#define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_FREE_ACK_BIT)
-
/* There are 4 different double linked lists nodelist entries can reside on.
* The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
* when Link Up discovery or Registered State Change Notification (RSCN)
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b60945182db8..96c087b8b474 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -300,10 +300,6 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
bpl->tus.w = le32_to_cpu(bpl->tus.w);
}
- /* prevent preparing iocb with NULL ndlp reference */
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto els_iocb_free_pbuf_exit;
elsiocb->context2 = pcmd;
elsiocb->context3 = pbuflist;
elsiocb->retry = retry;
@@ -378,7 +374,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
sp = &phba->fc_fabparam;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
err = 1;
goto fail;
}
@@ -418,10 +414,14 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
* for the callback routine.
*/
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp) {
+ err = 6;
+ goto fail_no_ndlp;
+ }
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- err = 6;
+ err = 7;
goto fail_issue_reg_login;
}
@@ -432,6 +432,7 @@ fail_issue_reg_login:
* for the failed mbox command.
*/
lpfc_nlp_put(ndlp);
+fail_no_ndlp:
mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -471,7 +472,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
!(phba->link_flag & LS_LOOPBACK_MODE) &&
!(vport->fc_flag & FC_PT2PT)) {
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
rc = -ENODEV;
goto fail;
}
@@ -765,14 +766,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(np))
- continue;
if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
continue;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&np->lock);
np->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&np->lock);
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
@@ -908,11 +907,6 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
if (!ndlp)
goto fail;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if(!ndlp)
- goto fail;
}
memcpy(&ndlp->nlp_portname, &sp->portName,
@@ -921,9 +915,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sizeof(struct lpfc_name));
/* Set state will put ndlp onto node list if not already done */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -1095,6 +1089,7 @@ stop_rr_fcf_flogi:
/* Do not register VFI if the driver aborted FLOGI */
if (!lpfc_error_lost_link(irsp))
lpfc_issue_reg_vfi(vport);
+
lpfc_nlp_put(ndlp);
goto out;
}
@@ -1156,6 +1151,7 @@ stop_rr_fcf_flogi:
phba->fcf.current_rec.fabric_name[5],
phba->fcf.current_rec.fabric_name[6],
phba->fcf.current_rec.fabric_name[7]);
+
lpfc_nlp_put(ndlp);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
@@ -1187,7 +1183,6 @@ flogifail:
spin_unlock_irq(&phba->hbalock);
lpfc_nlp_put(ndlp);
-
if (!lpfc_error_lost_link(irsp)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -1205,6 +1200,7 @@ flogifail:
}
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -1344,7 +1340,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue FLOGI: opt:x%x",
phba->sli3_options, 0, 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto out;
+
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+ if (rc == IOCB_ERROR)
+ lpfc_nlp_put(ndlp);
phba->hba_flag |= HBA_FLOGI_ISSUED;
@@ -1374,11 +1376,11 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_myDID = did;
}
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
- return 0;
+ if (!rc)
+ return 0;
+ out:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -1421,9 +1423,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
icmd = &iocb->iocb;
if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (ndlp->nlp_DID == Fabric_DID))
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ if (ndlp && (ndlp->nlp_DID == Fabric_DID))
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ NULL);
}
}
spin_unlock_irq(&phba->hbalock);
@@ -1464,13 +1466,9 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
return 0;
/* Set the node type */
ndlp->nlp_type |= NLP_FABRIC;
+
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
}
if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
@@ -1511,13 +1509,12 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
ndlp = lpfc_nlp_init(vport, Fabric_DID);
if (!ndlp)
return 0;
+
+ /* NPIV is only supported in Fabrics. */
+ ndlp->nlp_type |= NLP_FABRIC;
+
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
}
if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
@@ -1562,7 +1559,7 @@ lpfc_more_plogi(struct lpfc_vport *vport)
}
/**
- * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
+ * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp
* @phba: pointer to lpfc hba data structure.
* @prsp: pointer to response IOCB payload.
* @ndlp: pointer to a node-list data structure.
@@ -1597,10 +1594,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *new_ndlp;
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
@@ -1608,8 +1602,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
uint16_t keep_nlp_state;
u32 keep_nlp_fc4_type = 0;
struct lpfc_nvme_rport *keep_nrport = NULL;
- int put_node;
- int put_rport;
unsigned long *active_rrqs_xri_bitmap = NULL;
/* Fabric nodes can have the same WWPN so we don't bother searching
@@ -1627,7 +1619,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
/* return immediately if the WWPN matches ndlp */
- if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
+ if (new_ndlp == ndlp)
return ndlp;
if (phba->sli_rev == LPFC_SLI_REV4) {
@@ -1662,28 +1654,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
phba->active_rrq_pool);
return ndlp;
}
- } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
- rc = memcmp(&ndlp->nlp_portname, name,
- sizeof(struct lpfc_name));
- if (!rc) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- new_ndlp = lpfc_enable_node(vport, new_ndlp,
- NLP_STE_UNUSED_NODE);
- if (!new_ndlp) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- keepDID = new_ndlp->nlp_DID;
- if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
- memcpy(active_rrqs_xri_bitmap,
- new_ndlp->active_rrqs_xri_bitmap,
- phba->cfg_rrq_xri_bitmap_sz);
} else {
keepDID = new_ndlp->nlp_DID;
if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -1711,7 +1681,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
ndlp->active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
- spin_lock_irq(shost->host_lock);
+ /* Lock both ndlps */
+ spin_lock_irq(&ndlp->lock);
+ spin_lock_irq(&new_ndlp->lock);
keep_new_nlp_flag = new_ndlp->nlp_flag;
keep_nlp_flag = ndlp->nlp_flag;
new_ndlp->nlp_flag = ndlp->nlp_flag;
@@ -1742,7 +1714,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
else
ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&new_ndlp->lock);
+ spin_unlock_irq(&ndlp->lock);
/* Set nlp_states accordingly */
keep_nlp_state = new_ndlp->nlp_state;
@@ -1761,36 +1734,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
"3179 PLOGI confirm NEW: %x %x\n",
new_ndlp->nlp_DID, keepDID);
- /* Fix up the rport accordingly */
- rport = ndlp->rport;
- if (rport) {
- rdata = rport->dd_data;
- if (rdata->pnode == ndlp) {
- /* break the link before dropping the ref */
- ndlp->rport = NULL;
- lpfc_nlp_put(ndlp);
- rdata->pnode = lpfc_nlp_get(new_ndlp);
- new_ndlp->rport = rport;
- }
- new_ndlp->nlp_type = ndlp->nlp_type;
- }
-
- /* Fix up the nvme rport */
- if (ndlp->nrport) {
- ndlp->nrport = NULL;
- lpfc_nlp_put(ndlp);
- }
-
- /* We shall actually free the ndlp with both nlp_DID and
- * nlp_portname fields equals 0 to avoid any ndlp on the
- * nodelist never to be used.
- */
- if (ndlp->nlp_DID == 0) {
- spin_lock_irq(&phba->ndlp_lock);
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
- }
-
/* Two ndlps cannot have the same did on the nodelist.
* Note: for this case, ndlp has a NULL WWPN so setting
* the nlp_fc4_type isn't required.
@@ -1803,10 +1746,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
- if (!NLP_CHK_NODE_ACT(ndlp))
- lpfc_drop_node(vport, ndlp);
- }
- else {
+ } else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3180 PLOGI confirm SWAP: %x %x\n",
new_ndlp->nlp_DID, keepDID);
@@ -1833,29 +1773,16 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
(ndlp->nlp_state == NLP_STE_MAPPED_NODE))
keep_nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
-
- /* Previous ndlp no longer active with nvme host transport.
- * Remove reference from earlier registration unless the
- * nvme host took care of it.
- */
- if (ndlp->nrport)
- lpfc_nlp_put(ndlp);
ndlp->nrport = keep_nrport;
-
- /* Fix up the rport accordingly */
- rport = ndlp->rport;
- if (rport) {
- rdata = rport->dd_data;
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
- }
}
+
+ /*
+ * If ndlp is not associated with any rport we can drop it here else
+ * let dev_loss_tmo_callbk trigger DEVICE_RM event
+ */
+ if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE))
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
mempool_free(active_rrqs_xri_bitmap,
@@ -1933,7 +1860,7 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->un.elsreq64.remoteID);
ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
+ if (!ndlp || ndlp != rrq->ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2882 RRQ completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
@@ -1966,7 +1893,9 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
out:
if (rrq)
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
/**
@@ -1996,7 +1925,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp, *free_ndlp;
struct lpfc_dmabuf *prsp;
int disc;
@@ -2010,23 +1939,23 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->un.elsreq64.remoteID);
ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0136 PLOGI completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
irsp->un.elsreq64.remoteID,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpIoTag);
- goto out;
+ goto out_freeiocb;
}
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -2038,9 +1967,9 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
goto out;
}
@@ -2049,9 +1978,9 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
goto out;
}
@@ -2064,10 +1993,25 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
+
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (lpfc_error_lost_link(irsp))
+ goto check_plogi;
+ else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
+
+ /* As long as this node is not registered with the scsi or nvme
+ * transport, it is no longer an active node. Otherwise
+ * devloss handles the final cleanup.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ }
} else {
/* Good status, call state machine */
prsp = list_entry(((struct lpfc_dmabuf *)
@@ -2075,9 +2019,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_PLOGI);
+ NLP_EVT_CMPL_PLOGI);
}
+ check_plogi:
if (disc && vport->num_disc_nodes) {
/* Check to see if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
@@ -2093,7 +2038,16 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "PLOGI Cmpl PUT: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+out_freeiocb:
+ /* Release the reference on the original I/O request. */
+ free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
return;
}
@@ -2122,7 +2076,6 @@ int
lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost;
struct serv_parm *sp;
struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *elsiocb;
@@ -2151,8 +2104,6 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
ndlp->nlp_defer_did = did;
return 0;
}
- if (!NLP_CHK_NODE_ACT(ndlp))
- ndlp = NULL;
}
/* If ndlp is not NULL, we will bump the reference count on it */
@@ -2162,10 +2113,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (!elsiocb)
return 1;
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
@@ -2207,13 +2157,24 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
- ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (ret == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PLOGI: did:x%x refcnt %d",
+ did, kref_read(&ndlp->kref), 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto io_err;
+
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (ret) {
+ lpfc_nlp_put(ndlp);
+ goto io_err;
}
return 0;
+
+ io_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -2234,7 +2195,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
char *mode;
@@ -2245,13 +2205,13 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &(rspiocb->iocb);
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
/* Driver supports multiple FC4 types. Counters matter. */
vport->fc_prli_sent--;
ndlp->fc4_prli_sent--;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PRLI cmpl: status:x%x/x%x did:x%x",
@@ -2301,6 +2261,20 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
+
+ /* As long as this node is not registered with the SCSI
+ * or NVMe transport and no other PRLIs are outstanding,
+ * it is no longer an active node. Otherwise devloss
+ * handles the final cleanup.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ !ndlp->fc4_prli_sent) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ }
} else {
/* Good status, call state machine. However, if another
* PRLI is outstanding, don't call the state machine
@@ -2313,6 +2287,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -2341,7 +2316,7 @@ int
lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
PRLI *npr;
struct lpfc_nvme_prli *npr_nvme;
@@ -2476,13 +2451,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
local_nlp_type &= ~NLP_FC4_NVME;
}
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue PRLI: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
phba->fc_stat.elsXmitPRLI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
/* The vport counters are used for lpfc_scan_finished, but
@@ -2491,15 +2462,18 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
vport->fc_prli_sent++;
ndlp->fc4_prli_sent++;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_PRLI_SND;
- spin_unlock_irq(shost->host_lock);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ spin_unlock_irq(&ndlp->lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PRLI: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto io_err;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto node_err;
/* The driver supports 2 FC4 types. Make sure
@@ -2508,8 +2482,17 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4 &&
local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
goto send_next_prli;
+ else
+ return 0;
- return 0;
+ node_err:
+ lpfc_nlp_put(ndlp);
+ io_err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -2650,7 +2633,6 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
int disc;
@@ -2669,10 +2651,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* ADISC completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0104 ADISC completes to NPort x%x "
@@ -2681,9 +2663,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpTimeout, disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
goto out;
}
@@ -2692,9 +2674,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_set_disctmo(vport);
}
goto out;
@@ -2705,19 +2687,35 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (lpfc_error_lost_link(irsp))
+ goto check_adisc;
+ else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
+
+ /* As long as this node is not registered with the SCSI or NVMe
+ * transport, it is no longer an active node. Otherwise
+ * devloss handles the final cleanup.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ }
} else
/* Good status, call state machine */
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
+ check_adisc:
/* Check to see if there are more ADISCs to be sent */
if (disc && vport->num_disc_nodes)
lpfc_more_adisc(vport);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -2745,7 +2743,7 @@ int
lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
ADISC *ap;
struct lpfc_iocbq *elsiocb;
@@ -2771,24 +2769,31 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ap->DID = be32_to_cpu(vport->fc_myDID);
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue ADISC: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
phba->fc_stat.elsXmitADISC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_ADISC_SND;
- spin_unlock_irq(shost->host_lock);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue ADISC: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -2809,7 +2814,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_vport *vport = ndlp->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfcMboxq *mbox;
unsigned long flags;
@@ -2819,9 +2823,9 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->context_un.rsp_iocb = rspiocb;
irsp = &(rspiocb->iocb);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO cmpl: status:x%x/x%x did:x%x",
@@ -2831,8 +2835,9 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0105 LOGO completes to NPort x%x "
- "Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, vport->num_disc_nodes);
if (lpfc_els_chk_latt(vport)) {
@@ -2840,17 +2845,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- /* Check to see if link went down during discovery */
- if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
- /* NLP_EVT_DEVICE_RM should unregister the RPI
- * which should abort all outstanding IOs.
- */
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_DEVICE_RM);
- skip_recovery = 1;
- goto out;
- }
-
/* The LOGO will not be retried on failure. A LOGO was
* issued to the remote rport and a ACC or RJT or no Answer are
* all acceptable. Note the failure and move forward with
@@ -2872,8 +2866,24 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Call state machine. This will unregister the rpi if needed. */
lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+ /* The driver sets this flag for an NPIV instance that doesn't want to
+ * log into the remote port.
+ */
+ if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+
+ /* Presume the node was released. */
+ return;
+ }
+
out:
+ /* Driver is done with the IO. */
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+
/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -2908,9 +2918,9 @@ out:
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
skip_recovery == 0) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irqsave(shost->host_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irqrestore(shost->host_lock, flags);
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3187 LOGO completes to NPort x%x: Start "
@@ -2919,8 +2929,21 @@ out:
irsp->un.ulpWord[4], irsp->ulpTimeout,
vport->num_disc_nodes);
lpfc_disc_start(vport);
+ return;
+ }
+
+ /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the
+ * driver sends a LOGO to the rport to cleanup. For fabric and
+ * initiator ports cleanup the node as long as it the node is not
+ * register with the transport.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
}
- return;
}
/**
@@ -2949,19 +2972,18 @@ int
lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_flag & NLP_LOGO_SND) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 0;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
@@ -2978,30 +3000,37 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pcmd += sizeof(uint32_t);
memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue LOGO: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
phba->fc_stat.elsXmitLOGO++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue LOGO: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -3024,6 +3053,7 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_nodelist *free_ndlp;
IOCB_t *irsp;
irsp = &rspiocb->iocb;
@@ -3041,7 +3071,11 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
+
+ free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
}
/**
@@ -3065,6 +3099,7 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *pcmd, *prsp;
u32 *pdata;
u32 cmd;
+ struct lpfc_nodelist *ndlp = cmdiocb->context1;
irsp = &rspiocb->iocb;
@@ -3143,6 +3178,7 @@ out:
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -3170,6 +3206,7 @@ out:
int
lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
@@ -3184,22 +3221,13 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 1;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return 1;
- }
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
@@ -3216,22 +3244,26 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
phba->fc_stat.elsXmitSCR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- /* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
- * the node.
- */
- lpfc_nlp_put(ndlp);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of node.
- */
- if (!(vport->fc_flag & FC_PT2PT))
- lpfc_nlp_put(ndlp);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue SCR: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
+
+ /* Keep the ndlp just in case RDF is being sent */
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -3257,6 +3289,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
int
lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
@@ -3287,24 +3320,14 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 1;
}
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return 1;
- }
event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
@@ -3319,29 +3342,31 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
event->portid.rscn_fid[2] = nportid & 0x000000FF;
+ phba->fc_stat.elsXmitRSCN++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue RSCN: did:x%x",
ndlp->nlp_DID, 0, 0);
- phba->fc_stat.elsXmitRSCN++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- /* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
- * the node.
- */
- lpfc_nlp_put(ndlp);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
+
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
if (!(vport->fc_flag & FC_PT2PT))
lpfc_nlp_put(ndlp);
-
return 0;
+io_err:
+ lpfc_nlp_put(ndlp);
+node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -3369,6 +3394,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
static int
lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
FARP *fp;
@@ -3386,21 +3412,12 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 1;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RNID);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return 1;
- }
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
@@ -3419,7 +3436,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ondlp = lpfc_findnode_did(vport, nportid);
- if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
+ if (ondlp) {
memcpy(&fp->OportName, &ondlp->nlp_portname,
sizeof(struct lpfc_name));
memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
@@ -3432,8 +3449,14 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
phba->fc_stat.elsXmitFARPR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
* lpfc_els_free_iocb routine to trigger the release of
* the node.
@@ -3474,6 +3497,7 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
struct lpfc_els_rdf_req *prdf;
struct lpfc_nodelist *ndlp;
uint16_t cmdsize;
+ int rc;
cmdsize = sizeof(*prdf);
@@ -3483,10 +3507,6 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return -ENODEV;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return -ENODEV;
}
/* RDF ELS is not required on an NPIV VN_Port. */
@@ -3497,13 +3517,8 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RDF);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return -ENOMEM;
- }
/* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)
@@ -3521,31 +3536,29 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue RDF: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6444 Xmit RDF to remote NPORT x%x\n",
ndlp->nlp_DID);
elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- /* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
- * the node.
- */
- lpfc_nlp_put(ndlp);
- lpfc_els_free_iocb(phba, elsiocb);
- return -EIO;
- }
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
- /* An RDF was issued - this put ensures the ndlp is cleaned up
- * when the RDF completes.
- */
- lpfc_nlp_put(ndlp);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue RDF: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return -EIO;
}
/**
@@ -3568,9 +3581,9 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
if (!(nlp->nlp_flag & NLP_DELAY_TMO))
return;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&nlp->lock);
nlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&nlp->lock);
del_timer_sync(&nlp->nlp_delayfunc);
nlp->nlp_last_elscmd = 0;
if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
@@ -3580,9 +3593,9 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
}
if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&nlp->lock);
nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&nlp->lock);
if (vport->num_disc_nodes) {
if (vport->port_state < LPFC_VPORT_READY) {
/* Check if there are more ADISCs to be sent */
@@ -3658,20 +3671,19 @@ void
lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
uint32_t cmd, retry;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
cmd = ndlp->nlp_last_elscmd;
ndlp->nlp_last_elscmd = 0;
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return;
}
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* If a discovery event readded nlp_delayfunc after timer
* firing and before processing the timer, cancel the
@@ -3800,7 +3812,6 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -3822,14 +3833,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmd = *elscmd++;
}
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ if (ndlp)
did = ndlp->nlp_DID;
else {
/* We should only hit this case for retrying PLOGI */
did = irsp->un.elsreq64.remoteID;
ndlp = lpfc_findnode_did(vport, did);
- if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
- && (cmd != ELS_CMD_PLOGI))
+ if (!ndlp && (cmd != ELS_CMD_PLOGI))
return 1;
}
@@ -4059,9 +4069,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
if (stat.un.b.lsRjtRsnCodeExp ==
LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
retry = 0;
goto out_retry;
}
@@ -4157,16 +4167,16 @@ out_retry:
}
phba->fc_stat.elsXmitRetry++;
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
+ if (ndlp && delay) {
phba->fc_stat.elsDelayRetry++;
ndlp->nlp_retry = cmdiocb->retry;
/* delay is specified in milliseconds */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(delay));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
if ((cmd == ELS_CMD_PRLI) ||
@@ -4188,7 +4198,7 @@ out_retry:
lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PLOGI:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp) {
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PLOGI_ISSUE);
@@ -4314,27 +4324,10 @@ int
lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
- struct lpfc_nodelist *ndlp;
- ndlp = (struct lpfc_nodelist *)elsiocb->context1;
- if (ndlp) {
- if (ndlp->nlp_flag & NLP_DEFER_RM) {
- lpfc_nlp_put(ndlp);
+ /* The I/O job is complete. Clear the context1 data. */
+ elsiocb->context1 = NULL;
- /* If the ndlp is not being used by another discovery
- * thread, free it.
- */
- if (!lpfc_nlp_not_used(ndlp)) {
- /* If ndlp is being used by another discovery
- * thread, just clear NLP_DEFER_RM
- */
- ndlp->nlp_flag &= ~NLP_DEFER_RM;
- }
- }
- else
- lpfc_nlp_put(ndlp);
- elsiocb->context1 = NULL;
- }
/* context2 = cmd, context2->next = rsp, context3 = bpl */
if (elsiocb->context2) {
if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
@@ -4409,10 +4402,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0109 ACC to LOGO completes to NPort x%x "
+ "0109 ACC to LOGO completes to NPort x%x refcnt %d"
"Data: x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
+ ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
/* NPort Recovery mode or node is just allocated */
@@ -4434,6 +4427,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* At this point, the driver is done so release the IOCB
*/
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -4463,20 +4457,17 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
+ "0006 rpi x%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
- if (NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_nlp_put(ndlp);
- /* This is the end of the default RPI cleanup logic for
- * this ndlp. If no other discovery threads are using
- * this ndlp, free all resources associated with it.
- */
- lpfc_nlp_not_used(ndlp);
- } else {
- lpfc_drop_node(ndlp->vport, ndlp);
- }
+ ndlp);
+ /* This is the end of the default RPI cleanup logic for
+ * this ndlp and it could get released. Clear the nlp_flags to
+ * prevent any further processing.
+ */
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ lpfc_nlp_put(ndlp);
+ lpfc_nlp_not_used(ndlp);
}
return;
@@ -4525,8 +4516,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* function can have cmdiocb->contest1 (ndlp) field set to NULL.
*/
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+ if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
/* A LS_RJT associated with Default RPI cleanup has its own
* separate code path.
*/
@@ -4535,7 +4525,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* Check to see if link went down during discovery */
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
+ if (!ndlp || lpfc_els_chk_latt(vport)) {
if (mbox) {
mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
if (mp) {
@@ -4544,8 +4534,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
mempool_free(mbox, phba->mbox_mem_pool);
}
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+ if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
if (lpfc_nlp_not_used(ndlp)) {
ndlp = NULL;
/* Indicate the node has already released,
@@ -4570,32 +4559,38 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
if (mbox) {
- if ((rspiocb->iocb.ulpStatus == 0)
- && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+ if ((rspiocb->iocb.ulpStatus == 0) &&
+ (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
- (!(vport->fc_flag & FC_PT2PT)) &&
- (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
- ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_DISCOVERY,
- "0314 PLOGI recov DID x%x "
- "Data: x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_state,
- ndlp->nlp_rpi, ndlp->nlp_flag);
- mp = mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt,
- mp->phys);
- kfree(mp);
+ (!(vport->fc_flag & FC_PT2PT))) {
+ if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "0314 PLOGI recov "
+ "DID x%x "
+ "Data: x%x x%x x%x\n",
+ ndlp->nlp_DID,
+ ndlp->nlp_state,
+ ndlp->nlp_rpi,
+ ndlp->nlp_flag);
+ mp = mbox->ctx_buf;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt,
+ mp->phys);
+ kfree(mp);
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
}
- mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
}
/* Increment reference count to ndlp to hold the
* reference to ndlp for the callback function.
*/
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ goto out;
+
mbox->vport = vport;
if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
@@ -4657,12 +4652,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
- spin_lock_irq(shost->host_lock);
+ if (ndlp && shost) {
+ spin_lock_irq(&ndlp->lock);
if (mbox)
ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* If the node is not being used by another discovery thread,
* and we are sending a reject, we are done with it.
@@ -4676,10 +4671,11 @@ out:
* the routine lpfc_els_free_iocb.
*/
cmdiocb->context1 = NULL;
-
}
+ /* Release the originating I/O reference. */
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -4713,7 +4709,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
LPFC_MBOXQ_t *mbox)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
@@ -4732,9 +4727,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 1;
}
@@ -4838,23 +4833,40 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 1;
}
if (ndlp->nlp_flag & NLP_LOGO_ACC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
} else {
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
}
phba->fc_stat.elsXmitACC++;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
+
+ /* Xmit ELS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
+ "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
+ "RPI: x%x, fc_flag x%x\n",
+ rc, elsiocb->iotag, elsiocb->sli4_xritag,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi, vport->fc_flag);
return 0;
+
+io_err:
+ lpfc_nlp_put(ndlp);
+node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -4884,13 +4896,13 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
LPFC_MBOXQ_t *mbox)
{
+ int rc;
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
- int rc;
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4925,13 +4937,21 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
phba->fc_stat.elsXmitLSRJT++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -4995,16 +5015,18 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ap->DID = be32_to_cpu(vport->fc_myDID);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC ADISC: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC ADISC: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
/* Xmit ELS ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -5015,6 +5037,12 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, vport->fc_flag);
return 0;
+
+io_err:
+ lpfc_nlp_put(ndlp);
+node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -5162,18 +5190,25 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC PRLI: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC PRLI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -5262,18 +5297,26 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC RNID: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC RNID: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
+
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -5369,18 +5412,25 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC ECHO: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC ECHO: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -5411,14 +5461,12 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS ADISCs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
lpfc_issue_els_adisc(vport, ndlp, 0);
@@ -5469,8 +5517,6 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS PLOGIs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
@@ -5947,7 +5993,6 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
lpfc_max_els_tries, rdp_context->ndlp,
rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
- lpfc_nlp_put(ndlp);
if (!elsiocb)
goto free_rdp_context;
@@ -6021,18 +6066,24 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
bpl->tus.w = le32_to_cpu(bpl->tus.w);
phba->fc_stat.elsXmitACC++;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto free_rdp_context;
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
+ if (rc == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ }
- kfree(rdp_context);
+ goto free_rdp_context;
- return;
error:
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
- lpfc_nlp_put(ndlp);
if (!elsiocb)
goto free_rdp_context;
@@ -6047,11 +6098,23 @@ error:
phba->fc_stat.elsXmitLSRJT++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto free_rdp_context;
+ }
- if (rc == IOCB_ERROR)
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ }
+
free_rdp_context:
+ /* This reference put is for the original unsolicited RDP. If the
+ * iocb prep failed, there is no reference to remove.
+ */
+ lpfc_nlp_put(ndlp);
kfree(rdp_context);
}
@@ -6155,6 +6218,11 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
cmd = &cmdiocb->iocb;
rdp_context->ndlp = lpfc_nlp_get(ndlp);
+ if (!rdp_context->ndlp) {
+ kfree(rdp_context);
+ rjt_err = LSRJT_UNABLE_TPC;
+ goto error;
+ }
rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
rdp_context->rx_id = cmd->ulpContext;
rdp_context->cmpl = lpfc_els_rdp_cmpl;
@@ -6249,10 +6317,19 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lcb_res->lcb_duration = lcb_context->duration;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
+
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- lpfc_els_free_iocb(phba, elsiocb);
+ if (!rc)
+ goto out;
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ out:
kfree(lcb_context);
return;
@@ -6279,9 +6356,17 @@ error:
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitLSRJT++;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto free_lcb_context;
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
+ if (rc == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ }
free_lcb_context:
kfree(lcb_context);
}
@@ -6381,7 +6466,7 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint8_t *lp;
struct fc_lcb_request_frame *beacon;
struct lpfc_lcb_context *lcb_context;
- uint8_t state, rjt_err;
+ u8 state, rjt_err = 0;
struct ls_rjt stat;
pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
@@ -6427,15 +6512,22 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
lcb_context->rx_id = cmdiocb->iocb.ulpContext;
lcb_context->ndlp = lpfc_nlp_get(ndlp);
+ if (!lcb_context->ndlp) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ goto rjt_free;
+ }
+
if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
"0193 failed to send mail box");
- kfree(lcb_context);
lpfc_nlp_put(ndlp);
rjt_err = LSRJT_UNABLE_TPC;
- goto rjt;
+ goto rjt_free;
}
return 0;
+
+rjt_free:
+ kfree(lcb_context);
rjt:
memset(&stat, 0, sizeof(stat));
stat.un.b.lsRjtRsnCode = rjt_err;
@@ -6578,8 +6670,7 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
/* Move all affected nodes by pending RSCNs to NPR state. */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) ||
- (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+ if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue;
@@ -6914,8 +7005,7 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
vport->num_disc_nodes = 0;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)
- && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer. Need to
* know how many gidfts were issued. If none, then just
* flush the RSCN. Otherwise, the outstanding requests
@@ -6933,13 +7023,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
} else {
/* Nameserver login in question. Revalidate. */
if (ndlp) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_PLOGI_ISSUE);
- if (!ndlp) {
- lpfc_els_flush_rscn(vport);
- return 0;
- }
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
} else {
ndlp = lpfc_nlp_init(vport, NameServer_DID);
if (!ndlp) {
@@ -7283,6 +7368,7 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
static void
lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
+ int rc = 0;
MAILBOX_t *mb;
IOCB_t *icmd;
struct RLS_RSP *rls_rsp;
@@ -7344,8 +7430,19 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_rpi);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
- lpfc_els_free_iocb(phba, elsiocb);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
+ return;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
}
/**
@@ -7386,6 +7483,8 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
cmdiocb->iocb.ulpContext)); /* rx_id */
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ goto node_err;
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
@@ -7396,6 +7495,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* command.
*/
lpfc_nlp_put(ndlp);
+node_err:
mempool_free(mbox, phba->mbox_mem_pool);
}
reject_out:
@@ -7433,6 +7533,7 @@ static int
lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct ls_rjt stat;
struct RTV_RSP *rtv_rsp;
@@ -7482,8 +7583,17 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 0;
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ }
return 0;
reject_out:
@@ -7523,7 +7633,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (ndlp != rrq->ndlp)
ndlp = rrq->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return 1;
/* If ndlp is not NULL, we will bump the reference count on it */
@@ -7552,13 +7662,20 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
did, rrq->xritag, rrq->rxid);
elsiocb->context_un.rrq = rrq;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
- ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
- if (ret == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (ret == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -7611,6 +7728,7 @@ static int
lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd, *oldcmd;
RPL_RSP rpl_rsp;
@@ -7652,12 +7770,20 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
ndlp->nlp_rpi);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -7994,7 +8120,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
else {
struct lpfc_nodelist *ndlp;
ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ if (ndlp)
remote_ID = ndlp->nlp_DID;
}
list_add_tail(&piocb->dlist, &abort_list);
@@ -8011,7 +8137,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
spin_lock_irq(&phba->hbalock);
list_del_init(&piocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
@@ -8111,7 +8237,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if (!list_empty(&abort_list))
@@ -8221,7 +8347,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
uint32_t *pcmd;
ndlp = cmdiocbp->context1;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return;
if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
@@ -8447,7 +8573,6 @@ static void
lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
{
- struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
uint32_t *payload, payload_len;
@@ -8497,20 +8622,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
newnode = 1;
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
ndlp->nlp_type |= NLP_FABRIC;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- goto dropit;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- newnode = 1;
- if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
- ndlp->nlp_type |= NLP_FABRIC;
} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- /* This is similar to the new node path */
- ndlp = lpfc_nlp_get(ndlp);
- if (!ndlp)
- goto dropit;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
newnode = 1;
}
@@ -8521,17 +8633,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Do not process any unsolicited ELS commands
* if the ndlp is in DEV_LOSS
*/
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
if (newnode)
lpfc_nlp_put(ndlp);
goto dropit;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto dropit;
elsiocb->vport = vport;
if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
@@ -8540,9 +8653,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0112 ELS command x%x received from NPORT x%x "
- "Data: x%x x%x x%x x%x\n",
- cmd, did, vport->port_state, vport->fc_flag,
- vport->fc_myDID, vport->fc_prevDID);
+ "refcnt %d Data: x%x x%x x%x x%x\n",
+ cmd, did, kref_read(&ndlp->kref), vport->port_state,
+ vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
/* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
@@ -8593,9 +8706,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, elsiocb,
NLP_EVT_RCV_PLOGI);
@@ -8622,7 +8735,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_LOGO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8664,7 +8778,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRSCN++;
lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_ADISC:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8742,7 +8857,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvLIRR++;
lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RLS:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8752,7 +8868,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRLS++;
lpfc_els_rcv_rls(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RPL:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8762,7 +8879,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRPL++;
lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RNID:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8772,7 +8890,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRNID++;
lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RTV:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8781,7 +8900,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRTV++;
lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RRQ:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8791,7 +8911,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRRQ++;
lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_ECHO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8801,7 +8922,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvECHO++;
lpfc_els_rcv_echo(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_REC:
/* receive this due to exchange closed */
@@ -8832,7 +8954,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"0115 Unknown ELS command x%x "
"received from NPORT x%x\n", cmd, did);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
}
@@ -8843,9 +8966,14 @@ lsrjt:
stat.un.b.lsRjtRsnCode = rjt_err;
stat.un.b.lsRjtRsnCodeExp = rjt_exp;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
- NULL);
+ NULL);
+ /* Remove the reference from above for new nodes. */
+ if (newnode)
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
}
+ /* Release the reference on this elsiocb, not the ndlp. */
lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = NULL;
@@ -8987,13 +9115,9 @@ lpfc_start_fdmi(struct lpfc_vport *vport)
return;
}
}
- if (!NLP_CHK_NODE_ACT(ndlp))
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (ndlp) {
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
- }
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
/**
@@ -9023,9 +9147,9 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
if (vport->fc_flag & FC_DISC_DELAYED) {
spin_unlock_irq(shost->host_lock);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "3334 Delay fc port discovery for %d seconds\n",
- phba->fc_ratov);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "3334 Delay fc port discovery for %d secs\n",
+ phba->fc_ratov);
mod_timer(&vport->delayed_disc_tmo,
jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
return;
@@ -9045,19 +9169,8 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
"0251 NameServer login: no memory\n");
return;
}
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp) {
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- lpfc_disc_start(vport);
- return;
- }
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0348 NameServer login: node freed\n");
- return;
- }
}
+
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -9177,8 +9290,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
}
- } else
+ } else {
lpfc_do_scr_ns_plogi(phba, vport);
+ }
}
mbox_err_exit:
/* Now, we decrement the ndlp reference count held for this
@@ -9211,6 +9325,11 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_reg_vpi(vport, mbox);
mbox->vport = vport;
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto mbox_err_exit;
+ }
+
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
== MBX_NOT_FINISHED) {
@@ -9283,7 +9402,6 @@ void
lpfc_retry_pport_discovery(struct lpfc_hba *phba)
{
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
/* Cancel the all vports retry delay retry timers */
lpfc_cancel_all_vport_retry_delay_timer(phba);
@@ -9293,11 +9411,10 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
if (!ndlp)
return;
- shost = lpfc_shost_from_vport(phba->pport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
phba->pport->port_state = LPFC_FLOGI;
return;
@@ -9419,13 +9536,12 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) ||
- (np->nlp_state != NLP_STE_NPR_NODE) ||
+ if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
continue;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
np->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
@@ -9448,6 +9564,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* to update the MAC address.
*/
lpfc_register_new_vport(phba, vport, ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
goto out;
}
@@ -9457,16 +9574,22 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_do_scr_ns_plogi(phba, vport);
+
+ /* The FDISC completed successfully. Move the fabric ndlp to
+ * UNMAPPED state and register with the transport.
+ */
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
goto out;
+
fdisc_failed:
if (vport->fc_vport &&
(vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
/* Cancel discovery timer */
lpfc_can_disctmo(vport);
- lpfc_nlp_put(ndlp);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -9559,16 +9682,25 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue FDISC: did:x%x",
did, 0, 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto err_out;
+
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0256 Issue FDISC: Cannot send IOCB\n");
- return 1;
+ lpfc_nlp_put(ndlp);
+ goto err_out;
}
+
lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
return 0;
+
+ err_out:
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0256 Issue FDISC: Cannot send IOCB\n");
+ return 1;
}
/**
@@ -9600,18 +9732,14 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"LOGO npiv cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
- lpfc_els_free_iocb(phba, cmdiocb);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
-
- /* Trigger the release of the ndlp after logo */
- lpfc_nlp_put(ndlp);
-
/* NPIV LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2928 NPIV LOGO completes to NPort x%x "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x x%x x%x x%x\n",
ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes);
+ irsp->ulpTimeout, vport->num_disc_nodes,
+ kref_read(&ndlp->kref), ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags);
if (irsp->ulpStatus == IOSTAT_SUCCESS) {
spin_lock_irq(shost->host_lock);
@@ -9620,6 +9748,11 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
}
+
+ /* Safe to release resources now. */
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+ vport->unreg_vpi_cmpl = VPORT_ERROR;
}
/**
@@ -9641,7 +9774,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int
lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
@@ -9667,18 +9800,25 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, 0);
elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -9764,8 +9904,6 @@ repeat:
goto repeat;
}
}
-
- return;
}
/**
@@ -10046,8 +10184,10 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
- if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+ if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
+ lpfc_nlp_put(sglq_entry->ndlp);
sglq_entry->ndlp = NULL;
+ }
}
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -10090,9 +10230,13 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
sglq_entry->state = SGL_FREED;
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_set_rrq_active(phba, ndlp,
- sglq_entry->sli4_lxritag,
- rxid, 1);
+
+ if (ndlp) {
+ lpfc_set_rrq_active(phba, ndlp,
+ sglq_entry->sli4_lxritag,
+ rxid, 1);
+ lpfc_nlp_put(ndlp);
+ }
/* Check if TXQ queue needs to be serviced */
if (pring && !list_empty(&pring->txq))
@@ -10155,10 +10299,10 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
* The rport is not responding. Remove the FCP-2 flag to prevent
* an ADISC in the follow-up recovery code.
*/
- spin_lock_irqsave(shost->host_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag |= NLP_ISSUE_LOGO;
- spin_unlock_irqrestore(shost->host_lock, flags);
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_unreg_rpi(vport, ndlp);
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bb02fd8bc2dd..2b6b5fc671fe 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -73,34 +73,67 @@ static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
static int lpfc_fcf_inuse(struct lpfc_hba *);
static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void
-lpfc_terminate_rport_io(struct fc_rport *rport)
+/* The source of a terminate rport I/O is either a dev_loss_tmo
+ * event or a call to fc_remove_host. While the rport should be
+ * valid during these downcalls, the transport can call twice
+ * in a single event. This routine provides somoe protection
+ * as the NDLP isn't really free, just released to the pool.
+ */
+static int
+lpfc_rport_invalid(struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
- struct lpfc_nodelist * ndlp;
- struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+
+ if (!rport) {
+ pr_err("**** %s: NULL rport, exit.\n", __func__);
+ return -EINVAL;
+ }
rdata = rport->dd_data;
+ if (!rdata) {
+ pr_err("**** %s: NULL dd_data on rport %p SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
+ return -EINVAL;
+ }
+
ndlp = rdata->pnode;
+ if (!rdata->pnode) {
+ pr_err("**** %s: NULL ndlp on rport %p SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
+ return -EINVAL;
+ }
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
- printk(KERN_ERR "Cannot find remote node"
- " to terminate I/O Data x%x\n",
- rport->port_id);
- return;
+ if (!ndlp->vport) {
+ pr_err("**** %s: Null vport on ndlp %p, DID x%x rport %p "
+ "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
+ rport->scsi_target_id);
+ return -EINVAL;
}
+ return 0;
+}
+
+void
+lpfc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *vport;
- phba = ndlp->phba;
+ if (lpfc_rport_invalid(rport))
+ return;
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
- "rport terminate: sid:x%x did:x%x flg:x%x",
- ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+ rdata = rport->dd_data;
+ ndlp = rdata->pnode;
+ vport = ndlp->vport;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport terminate: sid:x%x did:x%x flg:x%x",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
if (ndlp->nlp_sid != NLP_NO_SID) {
- lpfc_sli_abort_iocb(ndlp->vport,
- &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ lpfc_sli_abort_iocb(vport,
+ &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
+ ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
}
@@ -110,19 +143,14 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
void
lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
{
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist * ndlp;
+ struct lpfc_nodelist *ndlp;
struct lpfc_vport *vport;
- struct Scsi_Host *shost;
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
- int put_node;
- int put_rport;
unsigned long iflags;
- rdata = rport->dd_data;
- ndlp = rdata->pnode;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
+ if (!ndlp)
return;
vport = ndlp->vport;
@@ -133,22 +161,24 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
- ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+ "3181 dev_loss_callbk x%06x, rport %p flg x%x "
+ "load_flag x%x refcnt %d\n",
+ ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref));
- /* Don't defer this if we are in the process of deleting the vport
- * or unloading the driver. The unload will cleanup the node
- * appropriately we just need to cleanup the ndlp rport info here.
+ /* Don't schedule a worker thread event if the vport is going down.
+ * The teardown process cleans up the node via lpfc_drop_node.
*/
if (vport->load_flag & FC_UNLOADING) {
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
+ ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
+
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+
+ /* Remove the node reference from remote_port_add now.
+ * The driver will not call remote_port_delete.
+ */
+ lpfc_nlp_put(ndlp);
return;
}
@@ -165,20 +195,28 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!list_empty(&evtp->evt_listp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "6790 rport name %llx dev_loss_evt pending",
+ "6790 rport name %llx dev_loss_evt pending\n",
rport->port_name);
return;
}
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irqsave(shost->host_lock, iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
+ /*
+ * The backend does not expect any more calls associated with this
+ * rport. Remove the association between rport and ndlp.
+ */
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
+ ndlp->rport = NULL;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
/* We need to hold the node by incrementing the reference
* count until this queued work is done
*/
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
spin_lock_irqsave(&phba->hbalock, iflags);
if (evtp->evt_arg1) {
@@ -204,70 +242,34 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
static int
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
{
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct Scsi_Host *shost;
uint8_t *name;
- int put_node;
int warn_on = 0;
int fcf_inuse = 0;
unsigned long iflags;
- rport = ndlp->rport;
vport = ndlp->vport;
- shost = lpfc_shost_from_vport(vport);
+ name = (uint8_t *)&ndlp->nlp_portname;
+ phba = vport->phba;
- spin_lock_irqsave(shost->host_lock, iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(shost->host_lock, iflags);
-
- if (!rport)
- return fcf_inuse;
-
- name = (uint8_t *) &ndlp->nlp_portname;
- phba = vport->phba;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4)
fcf_inuse = lpfc_fcf_inuse(phba);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport devlosstmo:did:x%x type:x%x id:x%x",
- ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+ "rport devlosstmo:did:x%x type:x%x id:x%x",
+ ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
- ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
-
- /*
- * lpfc_nlp_remove if reached with dangling rport drops the
- * reference. To make sure that does not happen clear rport
- * pointer in ndlp before lpfc_nlp_put.
- */
- rdata = rport->dd_data;
-
- /* Don't defer this if we are in the process of deleting the vport
- * or unloading the driver. The unload will cleanup the node
- * appropriately we just need to cleanup the ndlp rport info here.
- */
- if (vport->load_flag & FC_UNLOADING) {
- if (ndlp->nlp_sid != NLP_NO_SID) {
- /* flush the target */
- lpfc_sli_abort_iocb(vport,
- &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
- }
- put_node = rdata->pnode != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
-
- return fcf_inuse;
- }
+ "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
+ /* If the driver is recovering the rport, ignore devloss. */
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0284 Devloss timeout Ignored on "
@@ -279,15 +281,11 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return fcf_inuse;
}
- put_node = rdata->pnode != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
+ /* Fabric nodes are done. */
+ if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
-
- if (ndlp->nlp_type & NLP_FABRIC)
return fcf_inuse;
+ }
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
@@ -315,11 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
- if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
- !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
- (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
- (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
- (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
return fcf_inuse;
@@ -552,6 +546,15 @@ lpfc_work_list_done(struct lpfc_hba *phba)
fcf_inuse,
nlp_did);
break;
+ case LPFC_EVT_RECOVER_PORT:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+ free_evt = 0;
+ /* decrement the node reference count held for
+ * this queued work
+ */
+ lpfc_nlp_put(ndlp);
+ break;
case LPFC_EVT_ONLINE:
if (phba->link_state < LPFC_LINK_DOWN)
*(int *) (evtp->evt_arg1) = lpfc_online(phba);
@@ -811,13 +814,13 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
struct lpfc_nodelist *ndlp, *next_ndlp;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
+
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
- ((vport->port_type == LPFC_NPIV_PORT) &&
- (ndlp->nlp_DID == NameServer_DID)))
+ ((vport->port_type == LPFC_NPIV_PORT) &&
+ ((ndlp->nlp_DID == NameServer_DID) ||
+ (ndlp->nlp_DID == FDMI_DID))))
lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */
@@ -983,8 +986,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
+
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -1032,9 +1034,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
- if (vport->fc_flag & FC_LBIT)
- lpfc_linkup_cleanup_nodes(vport);
-
+ lpfc_linkup_cleanup_nodes(vport);
}
static int
@@ -3196,7 +3196,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
- phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
shost = lpfc_shost_from_vport(vport);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
@@ -3579,16 +3579,15 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY,
+ "0002 rpi:%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@@ -3602,9 +3601,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* there is another reg login in
* process.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* We cannot leave the RPI registered because
@@ -4076,8 +4075,16 @@ out:
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
- /* If no other thread is using the ndlp, free it */
- lpfc_nlp_not_used(ndlp);
+ /* If the node is not registered with the scsi or nvme
+ * transport, remove the fabric node. The failed reg_login
+ * is terminal.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_not_used(ndlp);
+ }
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/*
@@ -4100,10 +4107,10 @@ out:
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
- "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
+ "0003 rpi:%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
@@ -4124,7 +4131,9 @@ out:
/* Issue SCR just before NameServer GID_FT Query */
lpfc_issue_els_scr(vport, 0);
- lpfc_issue_els_rdf(vport, 0);
+ if (!phba->cfg_enable_mi ||
+ phba->sli4_hba.pc_sli4_params.mi_ver < LPFC_MIB3_SUPPORT)
+ lpfc_issue_els_rdf(vport, 0);
}
vport->fc_ns_retry = 0;
@@ -4154,6 +4163,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_rport_data *rdata;
struct fc_rport_identifiers rport_ids;
struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
return;
@@ -4164,47 +4174,46 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rport_ids.port_id = ndlp->nlp_DID;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
- /*
- * We leave our node pointer in rport->dd_data when we unregister a
- * FCP target port. But fc_remote_port_add zeros the space to which
- * rport->dd_data points. So, if we're reusing a previously
- * registered port, drop the reference that we took the last time we
- * registered the port.
- */
- rport = ndlp->rport;
- if (rport) {
- rdata = rport->dd_data;
- /* break the link before dropping the ref */
- ndlp->rport = NULL;
- if (rdata) {
- if (rdata->pnode == ndlp)
- lpfc_nlp_put(ndlp);
- rdata->pnode = NULL;
- }
- /* drop reference for earlier registeration */
- put_device(&rport->dev);
- }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport add: did:x%x flg:x%x type x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+ "rport add: did:x%x flg:x%x type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
/* Don't add the remote port if unloading. */
if (vport->load_flag & FC_UNLOADING)
return;
+ /*
+ * Disassociate any older association between this ndlp and rport
+ */
+ if (ndlp->rport) {
+ rdata = ndlp->rport->dd_data;
+ rdata->pnode = NULL;
+ }
+
ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
- if (!rport || !get_device(&rport->dev)) {
+ if (!rport) {
dev_printk(KERN_WARNING, &phba->pcidev->dev,
"Warning: fc_remote_port_add failed\n");
return;
}
- /* initialize static port data */
+ /* Successful port add. Complete initializing node data */
rport->maxframe_size = ndlp->nlp_maxframe;
rport->supported_classes = ndlp->nlp_class_sup;
rdata = rport->dd_data;
rdata->pnode = lpfc_nlp_get(ndlp);
+ if (!rdata->pnode) {
+ dev_warn(&phba->pcidev->dev,
+ "Warning - node ref failed. Unreg rport\n");
+ fc_remote_port_delete(rport);
+ ndlp->rport = NULL;
+ return;
+ }
+
+ spin_lock_irqsave(&ndlp->lock, flags);
+ ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, flags);
if (ndlp->nlp_type & NLP_FCP_TARGET)
rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
@@ -4221,13 +4230,14 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3183 rport register x%06x, rport x%px role x%x\n",
- ndlp->nlp_DID, rport, rport_ids.roles);
+ "3183 %s rport x%px DID x%x, role x%x\n",
+ __func__, rport, rport->port_id, rport->roles);
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
+
return;
}
@@ -4245,12 +4255,12 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3184 rport unregister x%06x, rport x%px\n",
- ndlp->nlp_DID, rport);
+ "3184 rport unregister x%06x, rport x%px "
+ "xptflg x%x\n",
+ ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags);
fc_remote_port_delete(rport);
-
- return;
+ lpfc_nlp_put(ndlp);
}
static void
@@ -4296,8 +4306,6 @@ static void
lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
ndlp->nlp_type |= NLP_FC_NODE;
@@ -4391,9 +4399,9 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(!ndlp->rport ||
ndlp->rport->scsi_target_id == -1 ||
ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
}
@@ -4426,6 +4434,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int old_state = ndlp->nlp_state;
+ int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
char name1[16], name2[16];
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -4438,6 +4447,12 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"node statechg did:x%x old:%d ste:%d",
ndlp->nlp_DID, old_state, state);
+ if (node_dropped && old_state == NLP_STE_UNUSED_NODE &&
+ state != NLP_STE_UNUSED_NODE) {
+ ndlp->nlp_flag &= ~NLP_DROPPED;
+ lpfc_nlp_get(ndlp);
+ }
+
if (old_state == NLP_STE_NPR_NODE &&
state != NLP_STE_NPR_NODE)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -4485,15 +4500,6 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
NLP_STE_UNUSED_NODE);
}
-static void
-lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
-{
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
- lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
- lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
- NLP_STE_UNUSED_NODE);
-}
/**
* lpfc_initialize_node - Initialize all fields of node object
* @vport: Pointer to Virtual Port object.
@@ -4515,128 +4521,19 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
+ INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
+
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
ndlp->nlp_sid = NLP_NO_SID;
ndlp->nlp_fc4_type = NLP_FC4_NONE;
kref_init(&ndlp->kref);
- NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
}
-struct lpfc_nodelist *
-lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- int state)
-{
- struct lpfc_hba *phba = vport->phba;
- uint32_t did, flag;
- unsigned long flags;
- unsigned long *active_rrqs_xri_bitmap = NULL;
- int rpi = LPFC_RPI_ALLOC_ERROR;
- uint32_t defer_did = 0;
-
- if (!ndlp)
- return NULL;
-
- if (phba->sli_rev == LPFC_SLI_REV4) {
- if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
- rpi = lpfc_sli4_alloc_rpi(vport->phba);
- else
- rpi = ndlp->nlp_rpi;
-
- if (rpi == LPFC_RPI_ALLOC_ERROR) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0359 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d FAILED RPI "
- " ALLOC\n",
- __func__,
- (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- return NULL;
- }
- }
-
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- /* The ndlp should not be in memory free mode */
- if (NLP_CHK_FREE_REQ(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0277 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- goto free_rpi;
- }
- /* The ndlp should not already be in active mode */
- if (NLP_CHK_NODE_ACT(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0278 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- goto free_rpi;
- }
-
- /* First preserve the orginal DID, xri_bitmap and some flags */
- did = ndlp->nlp_DID;
- flag = (ndlp->nlp_flag & NLP_UNREG_INP);
- if (flag & NLP_UNREG_INP)
- defer_did = ndlp->nlp_defer_did;
- if (phba->sli_rev == LPFC_SLI_REV4)
- active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
-
- /* Zero ndlp except of ndlp linked list pointer */
- memset((((char *)ndlp) + sizeof (struct list_head)), 0,
- sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
-
- /* Next reinitialize and restore saved objects */
- lpfc_initialize_node(vport, ndlp, did);
- ndlp->nlp_flag |= flag;
- if (flag & NLP_UNREG_INP)
- ndlp->nlp_defer_did = defer_did;
- if (phba->sli_rev == LPFC_SLI_REV4)
- ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
-
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0008 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
- }
-
-
- if (state != NLP_STE_UNUSED_NODE)
- lpfc_nlp_set_state(vport, ndlp, state);
- else
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0013 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px STATE=UNUSED\n",
- ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
-
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
- "node enable: did:x%x",
- ndlp->nlp_DID, 0, 0);
- return ndlp;
-
-free_rpi:
- if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_sli4_free_rpi(vport->phba, rpi);
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- }
- return NULL;
-}
-
void
lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -4650,6 +4547,7 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ ndlp->nlp_flag |= NLP_DROPPED;
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
@@ -4908,8 +4806,14 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
{
unsigned long iflags;
+ /* Driver always gets a reference on the mailbox job
+ * in support of async jobs.
+ */
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ return;
+
if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -4917,20 +4821,15 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) &&
(kref_read(&ndlp->kref) > 0)) {
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
} else {
if (vport->load_flag & FC_UNLOADING) {
if (phba->sli_rev == LPFC_SLI_REV4) {
- spin_lock_irqsave(&vport->phba->ndlp_lock,
- iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- spin_unlock_irqrestore(&vport->phba->ndlp_lock,
- iflags);
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
}
- lpfc_nlp_get(ndlp);
}
- mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
@@ -4988,6 +4887,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
+ if (!mbox->ctx_ndlp) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+
if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
/*
* accept PLOGIs after unreg_rpi_cmpl
@@ -5011,6 +4915,29 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
}
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "1444 Failed to allocate mempool "
+ "unreg_rpi UNREG x%x, "
+ "DID x%x, flag x%x, "
+ "ndlp x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp);
+
+ /* Because mempool_alloc failed, we
+ * will issue a LOGO here and keep the rpi alive if
+ * not unloading.
+ */
+ if (!(vport->load_flag & FC_UNLOADING)) {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
+
+ return 1;
}
lpfc_no_rpi(phba, ndlp);
out:
@@ -5131,11 +5058,9 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
static int
lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
- unsigned long iflags;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5143,22 +5068,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
- if (NLP_CHK_FREE_REQ(ndlp)) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0280 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- lpfc_dequeue_node(vport, ndlp);
- } else {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0281 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- lpfc_disable_node(vport, ndlp);
- }
-
+ lpfc_dequeue_node(vport, ndlp);
/* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
@@ -5205,108 +5115,22 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_els_abort(phba, ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = 0;
del_timer_sync(&ndlp->nlp_delayfunc);
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ list_del_init(&ndlp->recovery_evt.evt_listp);
lpfc_cleanup_vports_rrqs(vport, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- if (!lpfc_unreg_rpi(vport, ndlp)) {
- /* Clean up unregistered and non freed rpis */
- if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
- !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
- lpfc_sli4_free_rpi(vport->phba,
- ndlp->nlp_rpi);
- spin_lock_irqsave(&vport->phba->ndlp_lock,
- iflags);
- ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- spin_unlock_irqrestore(&vport->phba->ndlp_lock,
- iflags);
- }
- }
return 0;
}
-/*
- * Check to see if we can free the nlp back to the freelist.
- * If we are in the middle of using the nlp in the discovery state
- * machine, defer the free till we reach the end of the state machine.
- */
-static void
-lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
- LPFC_MBOXQ_t *mbox;
- int rc;
-
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
- !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
- !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
- phba->sli_rev != LPFC_SLI_REV4) {
- /* For this case we need to cleanup the default rpi
- * allocated by the firmware.
- */
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_NODE | LOG_DISCOVERY,
- "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
- "ref %d map:x%x ndlp x%px\n",
- ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
- != NULL) {
- rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
- (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
- if (rc) {
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- else {
- mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
- mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
- mbox->vport = vport;
- mbox->ctx_ndlp = ndlp;
- rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- }
- }
- }
- lpfc_cleanup_node(vport, ndlp);
-
- /*
- * ndlp->rport must be set to NULL before it reaches here
- * i.e. break rport/node link before doing lpfc_nlp_put for
- * registered rport and then drop the reference of rport.
- */
- if (ndlp->rport) {
- /*
- * extra lpfc_nlp_put dropped the reference of ndlp
- * for registered rport so need to cleanup rport
- */
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0940 removed node x%px DID x%x "
- "rpi %d rport not null x%px\n",
- ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
- ndlp->rport);
- rport = ndlp->rport;
- rdata = rport->dd_data;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- put_device(&rport->dev);
- }
-}
-
static int
lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
@@ -5373,8 +5197,8 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
if (lpfc_matchdid(vport, ndlp, did)) {
data1 = (((uint32_t)ndlp->nlp_state << 24) |
((uint32_t)ndlp->nlp_xri << 16) |
- ((uint32_t)ndlp->nlp_type << 8) |
- ((uint32_t)ndlp->nlp_usg_map & 0xff));
+ ((uint32_t)ndlp->nlp_type << 8)
+ );
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
"Data: x%px x%x x%x x%x x%x x%px\n",
@@ -5442,7 +5266,6 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
struct lpfc_nodelist *
lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
ndlp = lpfc_findnode_did(vport, did);
@@ -5463,28 +5286,9 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
- return ndlp;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- if (vport->phba->nvmet_support)
- return NULL;
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
- "0014 Could not enable ndlp\n");
- return NULL;
- }
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "6454 Setup Enabled Node 2B_DISC x%x "
- "Data:x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, vport->fc_flag);
-
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp;
}
@@ -5526,9 +5330,9 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6456 Skip Setup RSCN Node x%x "
@@ -5562,9 +5366,9 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
*/
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
return ndlp;
}
@@ -5802,7 +5606,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
icmd = &iocb->iocb;
if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
}
}
spin_unlock_irq(&phba->hbalock);
@@ -5821,8 +5625,6 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
lpfc_free_tx(phba, ndlp);
@@ -5910,8 +5712,6 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
/* Start discovery by sending FLOGI, clean up old rpis */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -5963,7 +5763,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
"NameServer login\n");
/* Next look for NameServer ndlp */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ if (ndlp)
lpfc_els_abort(phba, ndlp);
/* ReStart discovery */
@@ -6136,10 +5936,10 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
- "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ "0004 rpi:%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
/*
* Start issuing Fabric-Device Management Interface (FDMI) command to
* 0xfffffa (FDMI well known port).
@@ -6168,10 +5968,6 @@ lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
{
uint16_t *rpi = param;
- /* check for active node */
- if (!NLP_CHK_NODE_ACT(ndlp))
- return 0;
-
return ndlp->nlp_rpi == *rpi;
}
@@ -6320,16 +6116,17 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+ spin_lock_init(&ndlp->lock);
+
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0007 Init New ndlp x%px, rpi:x%x DID:%x "
- "flg:x%x refcnt:%d map:x%x\n",
+ "flg:x%x refcnt:%d\n",
ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag, kref_read(&ndlp->kref),
- ndlp->nlp_usg_map);
+ ndlp->nlp_flag, kref_read(&ndlp->kref));
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
@@ -6354,39 +6151,37 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
static void
lpfc_nlp_release(struct kref *kref)
{
- struct lpfc_hba *phba;
- unsigned long flags;
struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
kref);
+ struct lpfc_vport *vport = ndlp->vport;
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node release: did:x%x flg:x%x type:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0279 %s: ndlp:x%px did %x "
- "usgmap:x%x refcnt:%d rpi:%x\n",
- __func__,
- (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref), ndlp->nlp_rpi);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0279 %s: ndlp:%p did %x refcnt:%d rpi:%x\n",
+ __func__, ndlp, ndlp->nlp_DID,
+ kref_read(&ndlp->kref), ndlp->nlp_rpi);
/* remove ndlp from action. */
- lpfc_nlp_remove(ndlp->vport, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_cleanup_node(vport, ndlp);
- /* clear the ndlp active flag for all release cases */
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- NLP_CLR_NODE_ACT(ndlp);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ /* Clear Node key fields to give other threads notice
+ * that this node memory is not valid anymore.
+ */
+ ndlp->vport = NULL;
+ ndlp->nlp_state = NLP_STE_FREED_NODE;
+ ndlp->nlp_flag = 0;
+ ndlp->fc4_xpt_flags = 0;
/* free ndlp memory for final ndlp release */
- if (NLP_CHK_FREE_REQ(ndlp)) {
- kfree(ndlp->lat_data);
- if (phba->sli_rev == LPFC_SLI_REV4)
- mempool_free(ndlp->active_rrqs_xri_bitmap,
- ndlp->phba->active_rrq_pool);
- mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
- }
+ kfree(ndlp->lat_data);
+ if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
+ mempool_free(ndlp->active_rrqs_xri_bitmap,
+ ndlp->phba->active_rrq_pool);
+ mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
}
/* This routine bumps the reference count for a ndlp structure to ensure
@@ -6396,7 +6191,6 @@ lpfc_nlp_release(struct kref *kref)
struct lpfc_nodelist *
lpfc_nlp_get(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
unsigned long flags;
if (ndlp) {
@@ -6404,94 +6198,43 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
"node get: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
+
/* The check of ndlp usage to prevent incrementing the
* ndlp reference count that is in the process of being
* released.
*/
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
+ if (!kref_get_unless_zero(&ndlp->kref)) {
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0276 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
+ "0276 %s: ndlp:x%px refcnt:%d\n",
+ __func__, (void *)ndlp, kref_read(&ndlp->kref));
return NULL;
- } else
- kref_get(&ndlp->kref);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ }
+ spin_unlock_irqrestore(&ndlp->lock, flags);
+ } else {
+ WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
}
+
return ndlp;
}
/* This routine decrements the reference count for a ndlp structure. If the
- * count goes to 0, this indicates the the associated nodelist should be
- * freed. Returning 1 indicates the ndlp resource has been released; on the
- * other hand, returning 0 indicates the ndlp resource has not been released
- * yet.
+ * count goes to 0, this indicates the associated nodelist should be freed.
*/
int
lpfc_nlp_put(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
- unsigned long flags;
-
- if (!ndlp)
- return 1;
-
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node put: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref));
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- /* Check the ndlp memory free acknowledge flag to avoid the
- * possible race condition that kref_put got invoked again
- * after previous one has done ndlp memory free.
- */
- if (NLP_CHK_FREE_ACK(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0274 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- return 1;
- }
- /* Check the ndlp inactivate log flag to avoid the possible
- * race condition that kref_put got invoked again after ndlp
- * is already in inactivating state.
- */
- if (NLP_CHK_IACT_REQ(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0275 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
+ if (ndlp) {
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node put: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
- return 1;
+ } else {
+ WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
}
- /* For last put, mark the ndlp usage flags to make sure no
- * other kref_get and kref_put on the same ndlp shall get
- * in between the process when the final kref_put has been
- * invoked on this ndlp.
- */
- if (kref_read(&ndlp->kref) == 1) {
- /* Indicate ndlp is put to inactive state. */
- NLP_SET_IACT_REQ(ndlp);
- /* Acknowledge ndlp memory free has been seen. */
- if (NLP_CHK_FREE_REQ(ndlp))
- NLP_SET_FREE_ACK(ndlp);
- }
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- /* Note, the kref_put returns 1 when decrementing a reference
- * count that was 1, it invokes the release callback function,
- * but it still left the reference count as 1 (not actually
- * performs the last decrementation). Otherwise, it actually
- * decrements the reference count and returns 0.
- */
- return kref_put(&ndlp->kref, lpfc_nlp_release);
+
+ return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
}
/* This routine free's the specified nodelist if it is not in use
@@ -6551,7 +6294,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
goto out;
}
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+ if (ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
ret = 1;
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index c20034b3101c..42682d95af52 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1380,6 +1380,9 @@ struct lpfc_fdmi_reg_hba {
struct lpfc_fdmi_reg_port_list rpl;
};
+/******** MI MIB ********/
+#define SLI_CT_MIB_Subtypes 0x11
+
/*
* Register HBA Attributes (RHAT)
*/
@@ -1465,7 +1468,7 @@ struct lpfc_fdmi_reg_portattr {
#define LPFC_FDMI2_HBA_ATTR 0x0002efff
/*
- * Port Attrubute Types
+ * Port Attribute Types
*/
#define RPRT_SUPPORTED_FC4_TYPES 0x1 /* 32 byte binary array */
#define RPRT_SUPPORTED_SPEED 0x2 /* 32-bit unsigned int */
@@ -1483,6 +1486,7 @@ struct lpfc_fdmi_reg_portattr {
#define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */
#define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */
#define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */
+#define RPRT_VENDOR_MI 0xf047 /* vendor ascii string */
#define RPRT_SMART_SERVICE 0xf100 /* 4 to 256 byte ASCII string */
#define RPRT_SMART_GUID 0xf101 /* 8 byte WWNN + 8 byte WWPN */
#define RPRT_SMART_VERSION 0xf102 /* 4 to 256 byte ASCII string */
@@ -1515,6 +1519,7 @@ struct lpfc_fdmi_reg_portattr {
#define LPFC_FDMI_SMART_ATTR_port_info 0x00100000 /* Vendor specific */
#define LPFC_FDMI_SMART_ATTR_qos 0x00200000 /* Vendor specific */
#define LPFC_FDMI_SMART_ATTR_security 0x00400000 /* Vendor specific */
+#define LPFC_FDMI_VENDOR_ATTR_mi 0x00800000 /* Vendor specific */
/* Bit mask for FDMI-1 defined PORT attributes */
#define LPFC_FDMI1_PORT_ATTR 0x0000003f
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 12e4e76233e6..541b9aef6bfe 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -3506,8 +3506,14 @@ struct lpfc_sli4_parameters {
#define cfg_max_tow_xri_MASK 0x0000ffff
#define cfg_max_tow_xri_WORD word20
- uint32_t word21; /* RESERVED */
- uint32_t word22; /* RESERVED */
+ uint32_t word21;
+#define cfg_mib_bde_cnt_SHIFT 16
+#define cfg_mib_bde_cnt_MASK 0x000000ff
+#define cfg_mib_bde_cnt_WORD word21
+#define cfg_mi_ver_SHIFT 0
+#define cfg_mi_ver_MASK 0x0000ffff
+#define cfg_mi_ver_WORD word21
+ uint32_t mib_size;
uint32_t word23; /* RESERVED */
uint32_t word24;
@@ -4380,9 +4386,11 @@ struct wqe_common {
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
#define wqe_ebde_cnt_WORD word10
-#define wqe_nvme_SHIFT 4
-#define wqe_nvme_MASK 0x00000001
-#define wqe_nvme_WORD word10
+#define wqe_xchg_SHIFT 4
+#define wqe_xchg_MASK 0x00000001
+#define wqe_xchg_WORD word10
+#define LPFC_SCSI_XCHG 0x0
+#define LPFC_NVME_XCHG 0x1
#define wqe_oas_SHIFT 6
#define wqe_oas_MASK 0x00000001
#define wqe_oas_WORD word10
@@ -4880,6 +4888,8 @@ struct lpfc_grp_hdr {
#define NVME_READ_CMD 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define NVME_WRITE_CMD 0x1
+#define COMMAND_DATA_IN 0x0
+#define COMMAND_DATA_OUT 0x1
#define FCP_COMMAND_TRECEIVE 0x2
#define FCP_COMMAND_TRSP 0x3
#define FCP_COMMAND_TSEND 0x7
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ca25e54bb782..ac67f420ec26 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2844,28 +2844,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- continue;
- spin_lock_irq(&phba->ndlp_lock);
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
- /* Trigger the release of the ndlp memory */
- lpfc_nlp_put(ndlp);
- continue;
- }
- spin_lock_irq(&phba->ndlp_lock);
- if (NLP_CHK_FREE_REQ(ndlp)) {
- /* The ndlp should not be in memory free mode already */
- spin_unlock_irq(&phba->ndlp_lock);
- continue;
- } else
- /* Indicate request for freeing ndlp memory */
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
-
if (vport->port_type != LPFC_PHYSICAL_PORT &&
ndlp->nlp_DID == Fabric_DID) {
/* Just free up ndlp with Fabric_DID for vports */
@@ -2873,20 +2851,23 @@ lpfc_cleanup(struct lpfc_vport *vport)
continue;
}
- /* take care of nodes in unused state before the state
- * machine taking action.
- */
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ if (ndlp->nlp_DID == Fabric_Cntl_DID &&
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
lpfc_nlp_put(ndlp);
continue;
}
- if (ndlp->nlp_type & NLP_FABRIC)
+ /* Fabric Ports not in UNMAPPED state are cleaned up in the
+ * DEVICE_RM event.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RM);
+ if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
}
/* At this point, ALL ndlp's should be gone
@@ -2901,12 +2882,13 @@ lpfc_cleanup(struct lpfc_vport *vport)
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0282 did:x%x ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- ndlp->nlp_DID, (void *)ndlp,
- ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
+ LOG_TRACE_EVENT,
+ "0282 did:x%x ndlp:x%px "
+ "refcnt:%d xflags x%x nflag x%x\n",
+ ndlp->nlp_DID, (void *)ndlp,
+ kref_read(&ndlp->kref),
+ ndlp->fc4_xpt_flags,
+ ndlp->nlp_flag);
}
break;
}
@@ -3080,7 +3062,6 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_vport **vports;
int i, rpi;
- unsigned long flags;
if (phba->sli_rev != LPFC_SLI_REV4)
return;
@@ -3096,22 +3077,18 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
rpi = lpfc_sli4_alloc_rpi(phba);
if (rpi == LPFC_RPI_ALLOC_ERROR) {
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- NLP_CLR_NODE_ACT(ndlp);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ /* TODO print log? */
continue;
}
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"0009 Assign RPI x%x to ndlp x%px "
- "DID:x%06x flg:x%x map:x%x\n",
+ "DID:x%06x flg:x%x\n",
ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, ndlp->nlp_usg_map);
+ ndlp->nlp_flag);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3510,8 +3487,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if ((!NLP_CHK_NODE_ACT(ndlp)) ||
- ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
/* Driver must assume RPI is invalid for
* any unused or inactive node.
*/
@@ -3519,33 +3495,42 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
continue;
}
- if (ndlp->nlp_type & NLP_FABRIC) {
- lpfc_disc_state_machine(vports[i], ndlp,
- NULL, NLP_EVT_DEVICE_RECOVERY);
- lpfc_disc_state_machine(vports[i], ndlp,
- NULL, NLP_EVT_DEVICE_RM);
- }
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* Whenever an SLI4 port goes offline, free the
* RPI. Get a new RPI when the adapter port
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ lpfc_printf_vlog(vports[i], KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"0011 Free RPI x%x on "
- "ndlp:x%px did x%x "
- "usgmap:x%x\n",
+ "ndlp: %p did x%x\n",
ndlp->nlp_rpi, ndlp,
- ndlp->nlp_DID,
- ndlp->nlp_usg_map);
+ ndlp->nlp_DID);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
lpfc_unreg_rpi(vports[i], ndlp);
+
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ lpfc_disc_state_machine(vports[i], ndlp,
+ NULL, NLP_EVT_DEVICE_RECOVERY);
+
+ /* Don't remove the node unless the
+ * has been unregistered with the
+ * transport. If so, let dev_loss
+ * take care of the node.
+ */
+ if (!(ndlp->fc4_xpt_flags &
+ (NVME_XPT_REGD | SCSI_XPT_REGD)))
+ lpfc_disc_state_machine
+ (vports[i], ndlp,
+ NULL,
+ NLP_EVT_DEVICE_RM);
+ }
}
}
}
@@ -4343,16 +4328,13 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Seed physical port template */
memcpy(template, &lpfc_template, sizeof(*template));
- if (use_no_reset_hba) {
+ if (use_no_reset_hba)
/* template is for a no reset SCSI Host */
- template->max_sectors = 0xffff;
template->eh_host_reset_handler = NULL;
- }
/* Template for all vports this physical port creates */
memcpy(&phba->vport_template, &lpfc_template,
sizeof(*template));
- phba->vport_template.max_sectors = 0xffff;
phba->vport_template.shost_attrs = lpfc_vport_attrs;
phba->vport_template.eh_bus_reset_handler = NULL;
phba->vport_template.eh_host_reset_handler = NULL;
@@ -5607,11 +5589,6 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
ndlp->nlp_type |= NLP_FABRIC;
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
}
if ((phba->pport->port_state < LPFC_FLOGI) &&
(phba->pport->port_state != LPFC_VPORT_FAILED))
@@ -5667,7 +5644,6 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
int rc;
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
int active_vlink_present;
struct lpfc_vport **vports;
int i;
@@ -5848,10 +5824,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
*/
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
@@ -5958,18 +5933,21 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
{
struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
/* First, declare the async event has been handled */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~ASYNC_EVENT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
/* Now, handle all the async events */
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
+ iflags);
+
/* Process the asynchronous event */
switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
case LPFC_TRAILER_CODE_LINK:
@@ -6001,9 +5979,12 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
&cq_event->cqe.mcqe_cmpl));
break;
}
+
/* Free the completion event processed to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event);
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
}
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
}
/**
@@ -6295,9 +6276,6 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
atomic_set(&phba->dbg_log_dmping, 0);
spin_lock_init(&phba->hbalock);
- /* Initialize ndlp management spinlock */
- spin_lock_init(&phba->ndlp_lock);
-
/* Initialize port_list spinlock */
spin_lock_init(&phba->port_list_lock);
INIT_LIST_HEAD(&phba->port_list);
@@ -6630,6 +6608,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
+ spin_lock_init(&phba->sli4_hba.asynce_list_lock);
+ spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
/*
* Initialize driver internal slow-path work queues
@@ -6641,8 +6621,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
/* Asynchronous event CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
/* Slow-path XRI aborted CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
/* Receive queue CQ Event work queue list */
@@ -7196,7 +7174,6 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
"1431 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
return 0;
}
@@ -10174,26 +10151,28 @@ lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
static void
lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
{
- LIST_HEAD(cqelist);
- struct lpfc_cq_event *cqe;
+ LIST_HEAD(cq_event_list);
+ struct lpfc_cq_event *cq_event;
unsigned long iflags;
/* Retrieve all the pending WCQEs from pending WCQE lists */
- spin_lock_irqsave(&phba->hbalock, iflags);
- /* Pending FCP XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
- &cqelist);
+
/* Pending ELS XRI abort events */
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
- &cqelist);
+ &cq_event_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
+
/* Pending asynnc events */
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
- &cqelist);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ &cq_event_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
- while (!list_empty(&cqelist)) {
- list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
- lpfc_sli4_cq_event_release(phba, cqe);
+ while (!list_empty(&cq_event_list)) {
+ list_remove_head(&cq_event_list, cq_event,
+ struct lpfc_cq_event, list);
+ lpfc_sli4_cq_event_release(phba, cq_event);
}
}
@@ -12310,6 +12289,21 @@ fcponly:
else
phba->nsler = 0;
+ /* Save PB info for use during HBA setup */
+ sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
+ sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters);
+ sli4_params->mib_size = mbx_sli4_parameters->mib_size;
+ sli4_params->mi_value = LPFC_DFLT_MIB_VAL;
+
+ /* Next we check for Vendor MIB support */
+ if (sli4_params->mi_ver && phba->cfg_enable_mi)
+ phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n",
+ sli4_params->mi_ver, phba->cfg_enable_mi,
+ sli4_params->mi_value, sli4_params->mib_bde_cnt,
+ sli4_params->mib_size);
return 0;
}
@@ -12515,10 +12509,11 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
}
lpfc_destroy_vport_work_array(phba, vports);
- /* Remove FC host and then SCSI host with the physical port */
+ /* Remove FC host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
+ /* Clean up all nodes, mailboxes and IOs. */
lpfc_cleanup(vport);
/*
@@ -12581,8 +12576,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
/**
* lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
- * @pdev: pointer to PCI device
- * @msg: power management message
+ * @dev_d: pointer to device
*
* This routine is to be called from the kernel's PCI subsystem to support
* system Power Management (PM) to device with SLI-3 interface spec. When
@@ -12600,10 +12594,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
+static int __maybe_unused
+lpfc_pci_suspend_one_s3(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12617,16 +12611,12 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
/* Disable interrupt from device */
lpfc_sli_disable_intr(phba);
- /* Save device state to PCI config space */
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
/**
* lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
- * @pdev: pointer to PCI device
+ * @dev_d: pointer to device
*
* This routine is to be called from the kernel's PCI subsystem to support
* system Power Management (PM) to device with SLI-3 interface spec. When PM
@@ -12643,10 +12633,10 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_resume_one_s3(struct pci_dev *pdev)
+static int __maybe_unused
+lpfc_pci_resume_one_s3(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
uint32_t intr_mode;
int error;
@@ -12654,19 +12644,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0452 PCI device Power Management resume.\n");
- /* Restore device state from PCI config space */
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- /*
- * As the new kernel behavior of pci_restore_state() API call clears
- * device saved_state flag, need to save the restored state again.
- */
- pci_save_state(pdev);
-
- if (pdev->is_busmaster)
- pci_set_master(pdev);
-
/* Startup the kernel thread for this host adapter. */
phba->worker_thread = kthread_run(lpfc_do_work, phba,
"lpfc_worker_%d", phba->brd_no);
@@ -13358,7 +13335,6 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
- /* Free the HBA sysfs attributes */
lpfc_free_sysfs_attr(vport);
/* Release all the vports against this physical port */
@@ -13371,7 +13347,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
}
lpfc_destroy_vport_work_array(phba, vports);
- /* Remove FC host and then SCSI host with the physical port */
+ /* Remove FC host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
@@ -13423,8 +13399,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
/**
* lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
- * @pdev: pointer to PCI device
- * @msg: power management message
+ * @dev_d: pointer to device
*
* This routine is called from the kernel's PCI subsystem to support system
* Power Management (PM) to device with SLI-4 interface spec. When PM invokes
@@ -13442,10 +13417,10 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
+static int __maybe_unused
+lpfc_pci_suspend_one_s4(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -13460,16 +13435,12 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
lpfc_sli4_disable_intr(phba);
lpfc_sli4_queue_destroy(phba);
- /* Save device state to PCI config space */
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
/**
* lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
- * @pdev: pointer to PCI device
+ * @dev_d: pointer to device
*
* This routine is called from the kernel's PCI subsystem to support system
* Power Management (PM) to device with SLI-4 interface spac. When PM invokes
@@ -13486,10 +13457,10 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_resume_one_s4(struct pci_dev *pdev)
+static int __maybe_unused
+lpfc_pci_resume_one_s4(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
uint32_t intr_mode;
int error;
@@ -13497,19 +13468,6 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0292 PCI device Power Management resume.\n");
- /* Restore device state from PCI config space */
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- /*
- * As the new kernel behavior of pci_restore_state() API call clears
- * device saved_state flag, need to save the restored state again.
- */
- pci_save_state(pdev);
-
- if (pdev->is_busmaster)
- pci_set_master(pdev);
-
/* Startup the kernel thread for this host adapter. */
phba->worker_thread = kthread_run(lpfc_do_work, phba,
"lpfc_worker_%d", phba->brd_no);
@@ -13825,8 +13783,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
/**
* lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
- * @pdev: pointer to PCI device
- * @msg: power management message
+ * @dev: pointer to device
*
* This routine is to be registered to the kernel's PCI subsystem to support
* system Power Management (PM). When PM invokes this method, it dispatches
@@ -13837,19 +13794,19 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+static int __maybe_unused
+lpfc_pci_suspend_one(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
int rc = -ENODEV;
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
- rc = lpfc_pci_suspend_one_s3(pdev, msg);
+ rc = lpfc_pci_suspend_one_s3(dev);
break;
case LPFC_PCI_DEV_OC:
- rc = lpfc_pci_suspend_one_s4(pdev, msg);
+ rc = lpfc_pci_suspend_one_s4(dev);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -13862,7 +13819,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
/**
* lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
- * @pdev: pointer to PCI device
+ * @dev: pointer to device
*
* This routine is to be registered to the kernel's PCI subsystem to support
* system Power Management (PM). When PM invokes this method, it dispatches
@@ -13873,19 +13830,19 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_resume_one(struct pci_dev *pdev)
+static int __maybe_unused
+lpfc_pci_resume_one(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
int rc = -ENODEV;
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
- rc = lpfc_pci_resume_one_s3(pdev);
+ rc = lpfc_pci_resume_one_s3(dev);
break;
case LPFC_PCI_DEV_OC:
- rc = lpfc_pci_resume_one_s4(pdev);
+ rc = lpfc_pci_resume_one_s4(dev);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -14065,14 +14022,17 @@ static const struct pci_error_handlers lpfc_err_handler = {
.resume = lpfc_io_resume,
};
+static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
+ lpfc_pci_suspend_one,
+ lpfc_pci_resume_one);
+
static struct pci_driver lpfc_driver = {
.name = LPFC_DRIVER_NAME,
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
.shutdown = lpfc_pci_remove_one,
- .suspend = lpfc_pci_suspend_one,
- .resume = lpfc_pci_resume_one,
+ .driver.pm = &lpfc_pci_pm_ops_one,
.err_handler = &lpfc_err_handler,
};
@@ -14124,7 +14084,7 @@ lpfc_init(void)
fc_release_transport(lpfc_transport_template);
goto unregister;
}
- lpfc_nvme_cmd_template();
+ lpfc_wqe_cmd_template();
lpfc_nvmet_cmd_template();
/* Initialize in case vector mapping is needed */
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 27ff67e9edae..be54fbf5146f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -46,6 +46,7 @@
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
#define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */
+#define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */
int
lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
@@ -111,8 +112,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
pool->current_count++;
}
- phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
- sizeof(LPFC_MBOXQ_t));
+ phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE,
+ sizeof(LPFC_MBOXQ_t));
if (!phba->mbox_mem_pool)
goto fail_free_mbuf_pool;
@@ -588,8 +589,6 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
* Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
* pool along a non-DMA-mapped container for it.
*
- * Notes: Not interrupt-safe. Must be called with no locks held.
- *
* Returns:
* pointer to HBQ on success
* NULL on failure
@@ -599,7 +598,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
{
struct rqb_dmabuf *dma_buf;
- dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
+ dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
if (!dma_buf)
return NULL;
@@ -722,7 +721,6 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
- (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to HRQ %d: %x %x %x "
"DRQ %x %x\n",
@@ -732,6 +730,7 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
rqb_entry->hrq->entry_count,
rqb_entry->drq->host_index,
rqb_entry->drq->hba_index);
+ (rqbp->rqb_free_buffer)(phba, rqb_entry);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 92d6e7b98770..1ac855640fc5 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -247,7 +247,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock);
list_del_init(&iocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
@@ -357,7 +357,6 @@ lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* Complete the unreg rpi mbx request, and update flags.
* This will also restart any deferred events.
*/
- lpfc_nlp_get(ndlp);
lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
if (!piocb) {
@@ -365,7 +364,7 @@ lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
"4578 PLOGI ACC fail\n");
if (mbox)
mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
+ return;
}
rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
@@ -376,15 +375,12 @@ lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(mbox, phba->mbox_mem_pool);
}
kfree(piocb);
-out:
- lpfc_nlp_put(ndlp);
}
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint64_t nlp_portwwn = 0;
@@ -588,7 +584,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
link_mbox->vport = vport;
- link_mbox->ctx_ndlp = ndlp;
+ link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!link_mbox->ctx_ndlp)
+ goto out;
+
link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
@@ -617,9 +616,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* command issued in lpfc_cmpl_els_acc().
*/
login_mbox->vport = vport;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* If there is an outstanding PLOGI issued, abort it before
@@ -648,9 +647,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* this ELS request. The only way to do this is
* to register, then unregister the RPI.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
@@ -739,7 +738,6 @@ static int
lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd;
struct serv_parm *sp;
@@ -821,9 +819,9 @@ out:
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -843,9 +841,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
* PLOGIs during LOGO storms from a device.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
if (els_cmd == ELS_CMD_PRLO)
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
@@ -890,9 +888,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
@@ -908,9 +906,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
}
@@ -918,9 +916,9 @@ out:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* The driver has to wait until the ACC completes before it continues
* processing the LOGO. The action will resume in
* lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
@@ -1036,12 +1034,10 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
static uint32_t
lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 0;
}
@@ -1050,16 +1046,16 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
(ndlp->nlp_type & NLP_FCP_TARGET)))) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 1;
}
}
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
@@ -1104,7 +1100,11 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
pmb->vport = vport;
- pmb->ctx_ndlp = ndlp;
+ pmb->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!pmb->ctx_ndlp) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
(!(vport->fc_flag & FC_OFFLINE_MODE)))
@@ -1192,12 +1192,11 @@ static uint32_t
lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
@@ -1258,9 +1257,9 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(vport->num_disc_nodes)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
@@ -1310,7 +1309,6 @@ static uint32_t
lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1325,9 +1323,9 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -1342,7 +1340,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
uint32_t evt)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp;
@@ -1488,7 +1485,11 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
}
+
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ goto out;
+
mbox->vport = vport;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
!= MBX_NOT_FINISHED) {
@@ -1537,9 +1538,6 @@ out:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_DEFER_RM;
- spin_unlock_irq(shost->host_lock);
return NLP_STE_FREED_NODE;
}
@@ -1573,12 +1571,10 @@ static uint32_t
lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
/* software abort outstanding PLOGI */
@@ -1595,7 +1591,6 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
@@ -1609,9 +1604,9 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
@@ -1620,7 +1615,6 @@ static uint32_t
lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
@@ -1631,9 +1625,9 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
if (vport->num_disc_nodes)
lpfc_more_adisc(vport);
}
@@ -1704,7 +1698,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
@@ -1722,9 +1715,9 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
@@ -1766,12 +1759,10 @@ static uint32_t
lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
/* software abort outstanding ADISC */
@@ -1788,7 +1779,6 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
@@ -1802,9 +1792,9 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -1907,7 +1897,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
/* software abort if any GID_FT is outstanding */
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
+ if (ns_ndlp)
lpfc_els_abort(phba, ns_ndlp);
}
@@ -1946,7 +1936,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
MAILBOX_t *mb = &pmb->u.mb;
@@ -1973,9 +1962,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_issue_els_logo(vport, ndlp, 0);
@@ -2058,12 +2047,10 @@ lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
lpfc_drop_node(vport, ndlp);
@@ -2077,8 +2064,6 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
@@ -2087,7 +2072,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
/* If we are a target we won't immediately transition into PRLI,
* so if REG_LOGIN already completed we don't need to ignore it.
@@ -2097,7 +2082,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2168,7 +2153,6 @@ static uint32_t
lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_hba *phba = vport->phba;
IOCB_t *irsp;
@@ -2290,9 +2274,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
out:
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_TARGET_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
@@ -2344,12 +2328,10 @@ static uint32_t
lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
/* software abort outstanding PLOGI */
@@ -2383,7 +2365,6 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
@@ -2397,9 +2378,9 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2436,12 +2417,11 @@ static uint32_t
lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
@@ -2478,13 +2458,11 @@ static uint32_t
lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2578,14 +2556,12 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
@@ -2656,14 +2632,12 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2672,7 +2646,6 @@ static uint32_t
lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* Ignore PLOGI if we have an outstanding LOGO */
@@ -2680,9 +2653,9 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
/* send PLOGI immediately, move to PLOGI issue state */
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
@@ -2698,7 +2671,6 @@ static uint32_t
lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
@@ -2709,10 +2681,10 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
lpfc_issue_els_adisc(vport, ndlp, 0);
} else {
@@ -2766,27 +2738,26 @@ static uint32_t
lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
} else {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
return ndlp->nlp_state;
}
@@ -2797,16 +2768,12 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_DEFER_RM;
- spin_unlock_irq(shost->host_lock);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
@@ -2893,12 +2860,10 @@ static uint32_t
lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
lpfc_drop_node(vport, ndlp);
@@ -2909,8 +2874,6 @@ static uint32_t
lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
@@ -2918,10 +2881,10 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 0c39ed50998c..39d147e251bf 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -62,180 +62,9 @@ lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
static struct nvme_fc_port_template lpfc_nvme_template;
-static union lpfc_wqe128 lpfc_iread_cmd_template;
-static union lpfc_wqe128 lpfc_iwrite_cmd_template;
-static union lpfc_wqe128 lpfc_icmnd_cmd_template;
-
-/* Setup WQE templates for NVME IOs */
-void
-lpfc_nvme_cmd_template(void)
-{
- union lpfc_wqe128 *wqe;
-
- /* IREAD template */
- wqe = &lpfc_iread_cmd_template;
- memset(wqe, 0, sizeof(union lpfc_wqe128));
-
- /* Word 0, 1, 2 - BDE is variable */
-
- /* Word 3 - cmd_buff_len, payload_offset_len is zero */
-
- /* Word 4 - total_xfer_len is variable */
-
- /* Word 5 - is zero */
-
- /* Word 6 - ctxt_tag, xri_tag is variable */
-
- /* Word 7 */
- bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
- bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
- bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
- bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
-
- /* Word 8 - abort_tag is variable */
-
- /* Word 9 - reqtag is variable */
-
- /* Word 10 - dbde, wqes is variable */
- bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
- bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
- bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
-
- /* Word 11 - pbde is variable */
- bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
- bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
-
- /* Word 12 - is zero */
-
- /* Word 13, 14, 15 - PBDE is variable */
-
- /* IWRITE template */
- wqe = &lpfc_iwrite_cmd_template;
- memset(wqe, 0, sizeof(union lpfc_wqe128));
-
- /* Word 0, 1, 2 - BDE is variable */
-
- /* Word 3 - cmd_buff_len, payload_offset_len is zero */
-
- /* Word 4 - total_xfer_len is variable */
-
- /* Word 5 - initial_xfer_len is variable */
-
- /* Word 6 - ctxt_tag, xri_tag is variable */
-
- /* Word 7 */
- bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
- bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
- bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
- bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
-
- /* Word 8 - abort_tag is variable */
-
- /* Word 9 - reqtag is variable */
-
- /* Word 10 - dbde, wqes is variable */
- bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
- bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
- bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
-
- /* Word 11 - pbde is variable */
- bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
- bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
-
- /* Word 12 - is zero */
-
- /* Word 13, 14, 15 - PBDE is variable */
-
- /* ICMND template */
- wqe = &lpfc_icmnd_cmd_template;
- memset(wqe, 0, sizeof(union lpfc_wqe128));
-
- /* Word 0, 1, 2 - BDE is variable */
-
- /* Word 3 - payload_offset_len is variable */
-
- /* Word 4, 5 - is zero */
-
- /* Word 6 - ctxt_tag, xri_tag is variable */
-
- /* Word 7 */
- bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
- bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
- bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
- bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
-
- /* Word 8 - abort_tag is variable */
-
- /* Word 9 - reqtag is variable */
-
- /* Word 10 - dbde, wqes is variable */
- bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
- bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
- bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
-
- /* Word 11 */
- bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
- bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
-
- /* Word 12, 13, 14, 15 - is zero */
-}
-
-/**
- * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
- * @pwqeq: Pointer to command iocb.
- * @xritag: Tag that uniqely identifies the local exchange resource.
- * @opt: Option bits -
- * bit 0 = inhibit sending abts on the link
- *
- * This function is called with hbalock held.
- **/
-void
-lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
-{
- union lpfc_wqe128 *wqe = &pwqeq->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(wqe, 0, sizeof(*wqe));
-
- if (opt & INHIBIT_ABORT)
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- /* Abort specified xri tag, with the mask deliberately zeroed */
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* Abort the IO associated with this outstanding exchange ID. */
- wqe->abort_cmd.wqe_com.abort_tag = xritag;
-
- /* iotag for the wqe completion. */
- bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
-
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-}
-
/**
* lpfc_nvme_create_queue -
* @pnvme_lport: Transport localport that LS is to be issued from
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
* @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
* @qsize: Size of the queue in bytes
* @handle: An opaque driver handle used in follow-up calls.
@@ -357,39 +186,47 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
struct lpfc_nvme_rport *rport = remoteport->private;
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
+ u32 fc4_xpt_flags;
ndlp = rport->ndlp;
- if (!ndlp)
+ if (!ndlp) {
+ pr_err("**** %s: NULL ndlp on rport %p remoteport %p\n",
+ __func__, rport, remoteport);
goto rport_err;
+ }
vport = ndlp->vport;
- if (!vport)
+ if (!vport) {
+ pr_err("**** %s: Null vport on ndlp %p, ste x%x rport %p\n",
+ __func__, ndlp, ndlp->nlp_state, rport);
goto rport_err;
+ }
+
+ fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
/* Remove this rport from the lport's list - memory is owned by the
* transport. Remove the ndlp reference for the NVME transport before
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete of remoteport x%px\n",
+ "6146 remoteport delete of remoteport %p\n",
remoteport);
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
/* The register rebind might have occurred before the delete
* downcall. Guard against this race.
*/
- if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
- ndlp->nrport = NULL;
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- spin_unlock_irq(&vport->phba->hbalock);
+ if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
+ ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
- /* Remove original register reference. The host transport
- * won't reference this rport/remoteport any further.
- */
- lpfc_nlp_put(ndlp);
- } else {
- spin_unlock_irq(&vport->phba->hbalock);
- }
+ spin_unlock_irq(&ndlp->lock);
+
+ /* On a devloss timeout event, one more put is executed provided the
+ * NVME and SCSI rport unregister requests are complete. If the vport
+ * is unloading, this extra put is executed by lpfc_drop_node.
+ */
+ if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
rport_err:
return;
@@ -567,6 +404,13 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Save for completion so we can release these resources */
genwqe->context1 = lpfc_nlp_get(ndlp);
+ if (!genwqe->context1) {
+ dev_warn(&phba->pcidev->dev,
+ "Warning: Failed node ref, not sending LS_REQ\n");
+ lpfc_sli_release_iocbq(phba, genwqe);
+ return 1;
+ }
+
genwqe->context2 = (uint8_t *)pnvme_lsreq;
/* Fill in payload, bp points to frame payload */
@@ -654,6 +498,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
"Data: x%x x%x rc x%x\n",
ndlp->nlp_DID, genwqe->iotag,
vport->port_state, rc);
+ lpfc_nlp_put(ndlp);
lpfc_sli_release_iocbq(phba, genwqe);
return 1;
}
@@ -695,7 +540,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int ret;
uint16_t ntype, nstate;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
"LS Req\n",
@@ -714,6 +559,9 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return -ENODEV;
}
+ if (!vport->phba->sli4_hba.nvmels_wq)
+ return -ENOMEM;
+
/*
* there are two dma buf in the request, actually there is one and
* the second one is just the start address + cmd size.
@@ -787,7 +635,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/**
* lpfc_nvme_ls_req - Issue an NVME Link Service request
* @pnvme_lport: Transport localport that LS is to be issued from.
- * @nvme_rport: Transport remoteport that LS is to be sent to.
+ * @pnvme_rport: Transport remoteport that LS is to be sent to.
* @pnvme_lsreq: the transport nvme_ls_req structure for the LS
*
* Driver registers this routine to handle any link service request
@@ -881,7 +729,7 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock(&pring->ring_lock);
if (foundit)
- lpfc_sli_issue_abort_iotag(phba, pring, wqe);
+ lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
spin_unlock_irq(&phba->hbalock);
if (foundit)
@@ -940,7 +788,6 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
{
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
- struct lpfc_hba *phba;
struct lpfc_nodelist *ndlp;
int ret;
@@ -948,7 +795,6 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
if (unlikely(!lport))
return;
vport = lport->vport;
- phba = vport->phba;
if (vport->load_flag & FC_UNLOADING)
return;
@@ -1134,7 +980,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
* transport is still transitioning.
*/
ndlp = lpfc_ncmd->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6062 Ignoring NVME cmpl. No ndlp\n");
goto out_err;
@@ -1292,7 +1138,7 @@ out_err:
/**
* lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
* @vport: pointer to a host virtual N_Port data structure
- * @lpfcn_cmd: Pointer to lpfc scsi command
+ * @lpfc_ncmd: Pointer to lpfc scsi command
* @pnode: pointer to a node-list data structure
* @cstat: pointer to the control status structure
*
@@ -1316,9 +1162,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
- if (!NLP_CHK_NODE_ACT(pnode))
- return -EINVAL;
-
/*
* There are three possibilities here - use scatter-gather segment, use
* the single mapping, or neither.
@@ -1390,6 +1233,9 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 9 */
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+ /* Word 10 */
+ bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
+
/* Words 13 14 15 are for PBDE support */
pwqeq->vport = vport;
@@ -1400,7 +1246,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/**
* lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
* @vport: pointer to a host virtual N_Port data structure
- * @lpfcn_cmd: Pointer to lpfc scsi command
+ * @lpfc_ncmd: Pointer to lpfc scsi command
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
@@ -1557,7 +1403,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
le32_to_cpu(first_data_sgl->sge_len);
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
- /* wqe_pbde is 1 in template */
+
+ /* Word 11 */
+ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
} else {
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
@@ -1582,16 +1430,14 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
/**
* lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @pnvme_lport: Pointer to the driver's local port data
+ * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
* @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ * @pnvme_fcreq: IO request from nvme fc to driver.
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
- * data structure to the rport
- indicated in @lpfc_nvme_rport.
+ * data structure to the rport indicated in @lpfc_nvme_rport.
*
* Return value :
* 0 - Success
@@ -1670,7 +1516,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* transport is still transitioning.
*/
ndlp = rport->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
"6053 Busy IO, ndlp not ready: rport x%px "
"ndlp x%px, DID x%06x\n",
@@ -1688,7 +1534,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"IO. State x%x, Type x%x Flg x%x\n",
pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type,
- ndlp->upcall_flags);
+ ndlp->fc4_xpt_flags);
atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -EBUSY;
goto out_fail;
@@ -1839,7 +1685,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
- * @rspiocb: Pointer to response iocb object.
+ * @abts_cmpl: Pointer to wcqe complete object.
*
* This is the callback function for any NVME FCP IO that was aborted.
*
@@ -1865,11 +1711,10 @@ lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
* lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @pnvme_lport: Pointer to the driver's local port data
+ * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
* @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ * @pnvme_fcreq: IO request from nvme fc to driver.
*
* Driver registers this routine as its nvme request io abort handler. This
* routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
@@ -1890,7 +1735,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_vport *vport;
struct lpfc_hba *phba;
struct lpfc_io_buf *lpfc_nbuf;
- struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
@@ -2001,42 +1845,23 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
goto out_unlock;
}
- abts_buf = __lpfc_sli_get_iocbq(phba);
- if (!abts_buf) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "6136 No available abort wqes. Skipping "
- "Abts req for nvme_fcreq x%px xri x%x\n",
- pnvme_fcreq, nvmereq_wqe->sli4_xritag);
- goto out_unlock;
- }
+ ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
+ lpfc_nvme_abort_fcreq_cmpl);
- /* Ready - mark outstanding as aborted by driver. */
- nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
-
- lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abts_buf->iocb_flag |= LPFC_IO_NVME;
- abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
- abts_buf->vport = vport;
- abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
- ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
spin_unlock(&lpfc_nbuf->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
- if (ret_val) {
+ if (ret_val != WQE_SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6137 Failed abts issue_wqe with status x%x "
"for nvme_fcreq x%px.\n",
ret_val, pnvme_fcreq);
- lpfc_sli_release_iocbq(phba, abts_buf);
return;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for "
- "ox_id x%x on reqtag x%x\n",
- nvmereq_wqe->sli4_xritag,
- abts_buf->iotag);
+ "ox_id x%x\n",
+ nvmereq_wqe->sli4_xritag);
return;
out_unlock:
@@ -2072,9 +1897,8 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
.fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
};
-/**
+/*
* lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
- * @phba: The HBA for which this call is being executed.
*
* This routine removes a nvme buffer from head of @hdwq io_buf_list
* and returns to caller.
@@ -2174,7 +1998,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
/**
* lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
- * @pvport - the lpfc_vport instance requesting a localport.
+ * @vport - the lpfc_vport instance requesting a localport.
*
* This routine is invoked to create an nvme localport instance to bind
* to the nvme_fc_transport. It is called once during driver load
@@ -2280,6 +2104,8 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
int ret, i, pending = 0;
struct lpfc_sli_ring *pring;
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli4_hdw_queue *qp;
+ int abts_scsi, abts_nvme;
/* Host transport has to clean up and confirm requiring an indefinite
* wait. Print a message if a 10 second wait expires and renew the
@@ -2290,17 +2116,23 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
pending = 0;
+ abts_scsi = 0;
+ abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].io_wq->pring;
+ qp = &phba->sli4_hba.hdwq[i];
+ pring = qp->io_wq->pring;
if (!pring)
continue;
- if (pring->txcmplq_cnt)
- pending += pring->txcmplq_cnt;
+ pending += pring->txcmplq_cnt;
+ abts_scsi += qp->abts_scsi_io_bufs;
+ abts_nvme += qp->abts_nvme_io_bufs;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6176 Lport x%px Localport x%px wait "
- "timed out. Pending %d. Renewing.\n",
- lport, vport->localport, pending);
+ "timed out. Pending %d [%d:%d]. "
+ "Renewing.\n",
+ lport, vport->localport, pending,
+ abts_scsi, abts_nvme);
continue;
}
break;
@@ -2313,7 +2145,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
/**
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
- * @pnvme: pointer to lpfc nvme data structure.
+ * @vport: pointer to a host virtual N_Port data structure
*
* This routine is invoked to destroy all lports bound to the phba.
* The lport memory was allocated by the nvme fc transport and is
@@ -2454,14 +2286,18 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
else
rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
oldrport = lpfc_ndlp_get_nrport(ndlp);
if (oldrport) {
prev_ndlp = oldrport->ndlp;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
} else {
- spin_unlock_irq(&vport->phba->hbalock);
- lpfc_nlp_get(ndlp);
+ spin_unlock_irq(&ndlp->lock);
+ if (!lpfc_nlp_get(ndlp)) {
+ dev_warn(&vport->phba->pcidev->dev,
+ "Warning - No node ref - exit register\n");
+ return 0;
+ }
}
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
@@ -2473,9 +2309,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Guard against an unregister/reregister
* race that leaves the WAIT flag set.
*/
- spin_lock_irq(&vport->phba->hbalock);
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+ ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
+ spin_unlock_irq(&ndlp->lock);
rport = remote_port->private;
if (oldrport) {
@@ -2483,10 +2320,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* before dropping the ndlp ref from
* register.
*/
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nrport = NULL;
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- spin_unlock_irq(&vport->phba->hbalock);
+ ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+ spin_unlock_irq(&ndlp->lock);
rport->ndlp = NULL;
rport->remoteport = NULL;
@@ -2495,8 +2332,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* reference would cause a premature cleanup.
*/
if (prev_ndlp && prev_ndlp != ndlp) {
- if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
- (!prev_ndlp->nrport))
+ if (!prev_ndlp->nrport)
lpfc_nlp_put(prev_ndlp);
}
}
@@ -2505,9 +2341,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rport->remoteport = remote_port;
rport->lport = lport;
rport->ndlp = ndlp;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nrport = rport;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Bind lport x%px to remoteport x%px "
@@ -2532,7 +2368,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
#endif
}
-/**
+/*
* lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
*
* If the ndlp represents an NVME Target, that we are logged into,
@@ -2546,11 +2382,11 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_rport *nrport;
struct nvme_fc_remote_port *remoteport = NULL;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
nrport = lpfc_ndlp_get_nrport(ndlp);
if (nrport)
remoteport = nrport->remoteport;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6170 Rescan NPort DID x%06x type x%x "
@@ -2613,20 +2449,21 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!lport)
goto input_err;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
remoteport = rport->remoteport;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
if (!remoteport)
goto input_err;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6033 Unreg nvme remoteport x%px, portname x%llx, "
- "port_id x%06x, portstate x%x port type x%x\n",
+ "port_id x%06x, portstate x%x port type x%x "
+ "refcnt %d\n",
remoteport, remoteport->port_name,
remoteport->port_id, remoteport->port_state,
- ndlp->nlp_type);
+ ndlp->nlp_type, kref_read(&ndlp->kref));
/* Sanity check ndlp type. Only call for NVME ports. Don't
* clear any rport state until the transport calls back.
@@ -2636,7 +2473,9 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
- ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
+ spin_lock_irq(&vport->phba->hbalock);
+ ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
+ spin_unlock_irq(&vport->phba->hbalock);
/* Don't let the host nvme transport keep sending keep-alives
* on this remoteport. Vport is unloading, no recovery. The
@@ -2647,8 +2486,15 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
+
+ /* The driver no longer knows if the nrport memory is valid.
+ * because the controller teardown process has begun and
+ * is asynchronous. Break the binding in the ndlp. Also
+ * remove the register ndlp reference to setup node release.
+ */
+ ndlp->nrport = NULL;
+ lpfc_nlp_put(ndlp);
if (ret != 0) {
- lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6167 NVME unregister failed %d "
"port_state x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 4a4c3f780e1f..69a5a844c69c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -38,7 +38,7 @@
#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
#define lpfc_ndlp_get_nrport(ndlp) \
- ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
+ ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)) \
? NULL : ndlp->nrport)
struct lpfc_nvme_qhandle {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index d4ade7cdb93a..a71df8788fff 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channsel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -105,7 +105,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 9 - reqtag, rcvoxid is variable */
/* Word 10 - wqes, xc is variable */
- bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
@@ -153,7 +153,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 10 - xc is variable */
bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
- bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
+ bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
@@ -195,7 +195,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 10 wqes, xc is variable */
bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
- bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
+ bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
@@ -371,8 +371,7 @@ finish:
/**
* lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
* @phba: HBA buffer is associated with
- * @ctxp: context to clean up
- * @mp: Buffer to free
+ * @ctx_buf: ctx buffer context
*
* Description: Frees the given DMA buffer in the appropriate way given by
* reposting it to its associated RQ so it can be reused.
@@ -1291,10 +1290,10 @@ lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/**
* lpfc_nvmet_ls_req - Issue an Link Service request
- * @targetport - pointer to target instance registered with nvmet transport.
- * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
+ * @targetport: pointer to target instance registered with nvmet transport.
+ * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
* Driver sets this value to the ndlp pointer.
- * @pnvme_lsreq - the transport nvme_ls_req structure for the LS
+ * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
*
* Driver registers this routine to handle any link service request
* from the nvme_fc transport to a remote nvme-aware port.
@@ -1336,9 +1335,9 @@ lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
/**
* lpfc_nvmet_ls_abort - Abort a prior NVME LS request
* @targetport: Transport targetport, that LS was issued from.
- * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
+ * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
* Driver sets this value to the ndlp pointer.
- * @pnvme_lsreq - the transport nvme_ls_req structure for LS to be aborted
+ * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted
*
* Driver registers this routine to abort an NVME LS request that is
* in progress (from the transports perspective).
@@ -1807,7 +1806,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ if (ndlp &&
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
lpfc_set_rrq_active(phba, ndlp,
@@ -2597,7 +2596,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
}
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -2717,7 +2716,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
}
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -3249,7 +3248,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
ndlp = lpfc_findnode_did(phba->pport, sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
if (tgtp)
@@ -3328,6 +3327,46 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
return 1;
}
+/**
+ * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
+ * @pwqeq: Pointer to command iocb.
+ * @xritag: Tag that uniqely identifies the local exchange resource.
+ * @opt: Option bits -
+ * bit 0 = inhibit sending abts on the link
+ *
+ * This function is called with hbalock held.
+ **/
+static void
+lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
+{
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+ */
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (opt & INHIBIT_ABORT)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ /* Abort specified xri tag, with the mask deliberately zeroed */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+
+ /* Abort the I/O associated with this outstanding exchange ID. */
+ wqe->abort_cmd.wqe_com.abort_tag = xritag;
+
+ /* iotag for the wqe completion. */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
+
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
+
static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_async_xchg_ctx *ctxp,
@@ -3347,7 +3386,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
ndlp = lpfc_findnode_did(phba->pport, sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
@@ -3423,7 +3462,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
- lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
+ lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
@@ -3596,8 +3635,8 @@ out:
/**
* lpfc_nvmet_invalidate_host
*
- * @phba - pointer to the driver instance bound to an adapter port.
- * @ndlp - pointer to an lpfc_nodelist type
+ * @phba: pointer to the driver instance bound to an adapter port.
+ * @ndlp: pointer to an lpfc_nodelist type
*
* This routine upcalls the nvmet transport to invalidate an NVME
* host to which this target instance had active connections.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 983eeb0e3d07..3b989f720937 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -313,7 +313,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
/**
* lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
* @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
+ * @num_to_alloc: The requested number of buffers to allocate.
*
* This routine allocates a scsi buffer for device with SLI-3 interface spec,
* the scsi buffer contains all the necessary information needed to initiate
@@ -497,6 +497,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
* lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
+ * @idx: index into hdwq
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
* FCP or NVME aborted xri.
@@ -579,6 +580,8 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
/**
* lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
* and returns to caller.
@@ -618,6 +621,8 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/**
* lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine removes a scsi buffer from head of @hdwq io_buf_list
* and returns to caller.
@@ -633,7 +638,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_sli4_hdw_queue *qp;
struct sli4_sge *sgl;
- IOCB_t *iocb;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_fcp_cmd;
uint32_t cpu, idx;
@@ -703,24 +707,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
- /*
- * Since the IOCB for the FCP I/O is built into this
- * lpfc_io_buf, initialize it with all known data now.
- */
- iocb = &lpfc_cmd->cur_iocbq.iocb;
- iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
- iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
- /* setting the BLP size to 2 * sizeof BDE may not be correct.
- * We are setting the bpl to point to out sgl. An sgl's
- * entries are 16 bytes, a bpl entries are 12 bytes.
- */
- iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
- iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
- iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
- iocb->ulpBdeCount = 1;
- iocb->ulpLe = 1;
- iocb->ulpClass = CLASS3;
-
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
@@ -730,6 +716,8 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/**
* lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
* and returns to caller.
@@ -818,6 +806,25 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
}
/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+ i += sizeof(uint32_t), j++) {
+ ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+ }
+}
+
+/**
* lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
* @phba: The Hba for which this call is being executed.
* @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -953,6 +960,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
return 0;
}
@@ -976,7 +984,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* @sc: The SCSI command to examine
* @reftag: (out) BlockGuard reference tag for transmitted data
* @apptag: (out) BlockGuard application tag for transmitted data
- * @new_guard (in) Value to replace CRC with if needed
+ * @new_guard: (in) Value to replace CRC with if needed
*
* Returns BG_ERR_* bit mask or 0 if request ignored
**/
@@ -1381,8 +1389,8 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* the specified SCSI command.
* @phba: The Hba for which this call is being executed.
* @sc: The SCSI command to examine
- * @txopt: (out) BlockGuard operation for transmitted data
- * @rxopt: (out) BlockGuard operation for received data
+ * @txop: (out) BlockGuard operation for transmitted data
+ * @rxop: (out) BlockGuard operation for received data
*
* Returns: zero on success; non-zero if tx and/or rx op cannot be determined
*
@@ -1461,8 +1469,8 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* the specified SCSI command in order to force a guard tag error.
* @phba: The Hba for which this call is being executed.
* @sc: The SCSI command to examine
- * @txopt: (out) BlockGuard operation for transmitted data
- * @rxopt: (out) BlockGuard operation for received data
+ * @txop: (out) BlockGuard operation for transmitted data
+ * @rxop: (out) BlockGuard operation for received data
*
* Returns: zero on success; non-zero if tx and/or rx op cannot be determined
*
@@ -1533,7 +1541,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* @phba: The Hba for which this call is being executed.
* @sc: pointer to scsi command we're working on
* @bpl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
+ * @datasegcnt: number of segments of data that have been dma mapped
*
* This function sets up BPL buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
@@ -1920,7 +1928,8 @@ out:
* @phba: The Hba for which this call is being executed.
* @sc: pointer to scsi command we're working on
* @sgl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
+ * @datasegcnt: number of segments of data that have been dma mapped
+ * @lpfc_cmd: lpfc scsi command object pointer.
*
* This function sets up SGL buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
@@ -2094,6 +2103,7 @@ out:
* @sgl: pointer to buffer list for protection groups
* @datacnt: number of segments of data that have been dma mapped
* @protcnt: number of segment of protection data that have been dma mapped
+ * @lpfc_cmd: lpfc scsi command object pointer.
*
* This function sets up SGL buffer list for protection groups of
* type LPFC_PG_TYPE_DIF
@@ -2881,6 +2891,150 @@ out:
}
}
+/*
+ * This function checks for BlockGuard errors detected by
+ * the HBA. In case of errors, the ASC/ASCQ fields in the
+ * sense buffer will be set accordingly, paired with
+ * ILLEGAL_REQUEST to signal to the kernel that the HBA
+ * detected corruption.
+ *
+ * Returns:
+ * 0 - No error found
+ * 1 - BlockGuard error found
+ * -1 - Internal error (bad profile, ...etc)
+ */
+static int
+lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ int ret = 0;
+ u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
+ u32 bghm = 0;
+ u32 bgstat = 0;
+ u64 failing_sector = 0;
+
+ if (status == CQE_STATUS_DI_ERROR) {
+ if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
+ bgstat |= BGS_GUARD_ERR_MASK;
+ if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
+ bgstat |= BGS_APPTAG_ERR_MASK;
+ if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
+ bgstat |= BGS_REFTAG_ERR_MASK;
+
+ /* Check to see if there was any good data before the error */
+ if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+ bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
+ bghm = wcqe->total_data_placed;
+ }
+
+ /*
+ * Set ALL the error bits to indicate we don't know what
+ * type of error it is.
+ */
+ if (!bgstat)
+ bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
+ BGS_GUARD_ERR_MASK);
+ }
+
+ if (lpfc_bgs_get_guard_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x1);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
+ phba->bg_guard_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9059 BLKGRD: Guard Tag error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ }
+
+ if (lpfc_bgs_get_reftag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x3);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
+
+ phba->bg_reftag_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9060 BLKGRD: Ref Tag error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ }
+
+ if (lpfc_bgs_get_apptag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x2);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
+
+ phba->bg_apptag_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9062 BLKGRD: App Tag error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ }
+
+ if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
+ /*
+ * setup sense data descriptor 0 per SPC-4 as an information
+ * field, and put the failing LBA in it.
+ * This code assumes there was also a guard/app/ref tag error
+ * indication.
+ */
+ cmd->sense_buffer[7] = 0xc; /* Additional sense length */
+ cmd->sense_buffer[8] = 0; /* Information descriptor type */
+ cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
+ cmd->sense_buffer[10] = 0x80; /* Validity bit */
+
+ /* bghm is a "on the wire" FC frame based count */
+ switch (scsi_get_prot_op(cmd)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ bghm /= cmd->device->sector_size;
+ break;
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ bghm /= (cmd->device->sector_size +
+ sizeof(struct scsi_dif_tuple));
+ break;
+ }
+
+ failing_sector = scsi_get_lba(cmd);
+ failing_sector += bghm;
+
+ /* Descriptor Information */
+ put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
+ }
+
+ if (!ret) {
+ /* No error was reported - problem in FW? */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9068 BLKGRD: Unknown error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+
+ /* Calcuate what type of error it was */
+ lpfc_calc_bg_err(phba, lpfc_cmd);
+ }
+ return ret;
+}
/*
* This function checks for BlockGuard errors detected by
@@ -3050,7 +3204,9 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
struct sli4_sge *first_data_sgl;
- IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ struct lpfc_vport *vport = phba->pport;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
dma_addr_t physaddr;
uint32_t num_bde = 0;
uint32_t dma_len;
@@ -3191,13 +3347,16 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
phba->cfg_enable_pbde) {
bde = (struct ulp_bde64 *)
- &(iocb_cmd->unsli3.sli3Words[5]);
+ &wqe->words[13];
bde->addrLow = first_data_sgl->addr_lo;
bde->addrHigh = first_data_sgl->addr_hi;
bde->tus.f.bdeSize =
le32_to_cpu(first_data_sgl->sge_len);
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
+
+ } else {
+ memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
}
} else {
sgl += 1;
@@ -3209,11 +3368,15 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
phba->cfg_enable_pbde) {
bde = (struct ulp_bde64 *)
- &(iocb_cmd->unsli3.sli3Words[5]);
+ &wqe->words[13];
memset(bde, 0, (sizeof(uint32_t) * 3));
}
}
+ /* Word 11 */
+ if (phba->cfg_enable_pbde)
+ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
+
/*
* Finish initializing those IOCB fields that are dependent on the
* scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
@@ -3221,12 +3384,23 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* all iocb memory resources are reused.
*/
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
-
- /*
- * Due to difference in data length between DIF/non-DIF paths,
- * we need to set word 4 of IOCB here
- */
- iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ /* Set first-burst provided it was successfully negotiated */
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ vport->cfg_first_burst_size &&
+ scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ u32 init_len, total_len;
+
+ total_len = be32_to_cpu(fcp_cmnd->fcpDl);
+ init_len = min(total_len, vport->cfg_first_burst_size);
+
+ /* Word 4 & 5 */
+ wqe->fcp_iwrite.initial_xfer_len = init_len;
+ wqe->fcp_iwrite.total_xfer_len = total_len;
+ } else {
+ /* Word 4 */
+ wqe->fcp_iwrite.total_xfer_len =
+ be32_to_cpu(fcp_cmnd->fcpDl);
+ }
/*
* If the OAS driver feature is enabled and the lun is enabled for
@@ -3237,6 +3411,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->priority;
+
+ /* Word 10 */
+ bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
+
+ if (lpfc_cmd->cur_iocbq.priority)
+ bf_set(wqe_ccp, &wqe->generic.wqe_com,
+ (lpfc_cmd->cur_iocbq.priority << 1));
+ else
+ bf_set(wqe_ccp, &wqe->generic.wqe_com,
+ (phba->cfg_XLanePriority << 1));
}
return 0;
@@ -3262,7 +3447,8 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
- IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t num_sge = 0;
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
@@ -3394,28 +3580,50 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
- /*
- * Due to difference in data length between DIF/non-DIF paths,
- * we need to set word 4 of IOCB here
- */
- iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+ /* Set first-burst provided it was successfully negotiated */
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ vport->cfg_first_burst_size &&
+ scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ u32 init_len, total_len;
- /*
- * For First burst, we may need to adjust the initial transfer
- * length for DIF
- */
- if (iocb_cmd->un.fcpi.fcpi_XRdy &&
- (fcpdl < vport->cfg_first_burst_size))
- iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+ total_len = be32_to_cpu(fcp_cmnd->fcpDl);
+ init_len = min(total_len, vport->cfg_first_burst_size);
+
+ /* Word 4 & 5 */
+ wqe->fcp_iwrite.initial_xfer_len = init_len;
+ wqe->fcp_iwrite.total_xfer_len = total_len;
+ } else {
+ /* Word 4 */
+ wqe->fcp_iwrite.total_xfer_len =
+ be32_to_cpu(fcp_cmnd->fcpDl);
+ }
/*
* If the OAS driver feature is enabled and the lun is enabled for
* OAS, set the oas iocb related flags.
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
- scsi_cmnd->device->hostdata)->oas_enabled)
+ scsi_cmnd->device->hostdata)->oas_enabled) {
lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ /* Word 10 */
+ bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->generic.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+
+ /* Word 7. DIF Flags */
+ if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
+ else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
+ else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
+
+ lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
+ LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
+
return 0;
err:
if (lpfc_cmd->seg_cnt)
@@ -3475,6 +3683,26 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
}
/**
+ * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
+ * buffer
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ * @tmo: Timeout value for IO
+ *
+ * This routine initializes IOCB/WQE data structure from scsi command
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static inline int
+lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo)
+{
+ return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
+}
+
+/**
* lpfc_send_scsi_error_event - Posts an event when there is SCSI error
* @phba: Pointer to hba context object.
* @vport: Pointer to vport object.
@@ -3486,17 +3714,16 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
**/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
- struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+ struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
- uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
struct lpfc_fast_path_event *fast_path_evt = NULL;
struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
unsigned long flags;
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ if (!pnode)
return;
/* If there is queuefull or busy condition send a scsi event */
@@ -3606,13 +3833,11 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
**/
static void
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_iocbq *rsp_iocb)
+ uint32_t fcpi_parm)
{
- struct lpfc_hba *phba = vport->phba;
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
- uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
@@ -3747,13 +3972,10 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
*/
} else if (fcpi_parm) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "9029 FCP %s Check Error xri x%x Data: "
+ "9029 FCP %s Check Error Data: "
"x%x x%x x%x x%x x%x\n",
((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
"Read" : "Write"),
- ((phba->sli_rev == LPFC_SLI_REV4) ?
- lpfc_cmd->cur_iocbq.sli4_xritag :
- rsp_iocb->iocb.ulpContext),
fcpDl, be32_to_cpu(fcprsp->rspResId),
fcpi_parm, cmnd->cmnd[0], scsi_status);
@@ -3780,7 +4002,336 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
out:
cmnd->result = host_status << 16 | scsi_status;
- lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
+ lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
+}
+
+/**
+ * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
+ * @phba: The hba for which this call is being executed.
+ * @pwqeIn: The command WQE for the scsi cmnd.
+ * @pwqeOut: The response WQE for the scsi cmnd.
+ *
+ * This routine assigns scsi command result by looking into response WQE
+ * status field appropriately. This routine handles QUEUE FULL condition as
+ * well by ramping down device queue depth.
+ **/
+static void
+lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_io_buf *lpfc_cmd =
+ (struct lpfc_io_buf *)pwqeIn->context1;
+ struct lpfc_vport *vport = pwqeIn->vport;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
+ struct scsi_cmnd *cmd;
+ unsigned long flags;
+ struct lpfc_fast_path_event *fast_path_evt;
+ struct Scsi_Host *shost;
+ u32 logit = LOG_FCP;
+ u32 status, idx;
+ unsigned long iflags = 0;
+
+ /* Sanity check on return of outstanding command */
+ if (!lpfc_cmd) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "9032 Null lpfc_cmd pointer. No "
+ "release, skip completion\n");
+ return;
+ }
+
+ rdata = lpfc_cmd->rdata;
+ ndlp = rdata->pnode;
+
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ /* TOREMOVE - currently this flag is checked during
+ * the release of lpfc_iocbq. Remove once we move
+ * to lpfc_wqe_job construct.
+ *
+ * This needs to be done outside buf_lock
+ */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+
+ /* Guard against abort handler being called at same time */
+ spin_lock(&lpfc_cmd->buf_lock);
+
+ /* Sanity check on return of outstanding command */
+ cmd = lpfc_cmd->pCmd;
+ if (!cmd || !phba) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "9042 I/O completion: Not an active IO\n");
+ spin_unlock(&lpfc_cmd->buf_lock);
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ return;
+ }
+ idx = lpfc_cmd->cur_iocbq.hba_wqidx;
+ if (phba->sli4_hba.hdwq)
+ phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+#endif
+ shost = cmd->device->host;
+
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
+ lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
+
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->prot_data_type) {
+ struct scsi_dif_tuple *src = NULL;
+
+ src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+ /*
+ * Used to restore any changes to protection
+ * data for error injection.
+ */
+ switch (lpfc_cmd->prot_data_type) {
+ case LPFC_INJERR_REFTAG:
+ src->ref_tag =
+ lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_APPTAG:
+ src->app_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_GUARD:
+ src->guard_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ default:
+ break;
+ }
+
+ lpfc_cmd->prot_data = 0;
+ lpfc_cmd->prot_data_type = 0;
+ lpfc_cmd->prot_data_segment = NULL;
+ }
+#endif
+ if (unlikely(lpfc_cmd->status)) {
+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+ (lpfc_cmd->result & IOERR_DRVR_MASK))
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ else if (lpfc_cmd->status >= IOSTAT_CNT)
+ lpfc_cmd->status = IOSTAT_DEFAULT;
+ if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
+ !lpfc_cmd->fcp_rsp->rspStatus3 &&
+ (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
+ !(vport->cfg_log_verbose & LOG_FCP_UNDER))
+ logit = 0;
+ else
+ logit = LOG_FCP | LOG_FCP_UNDER;
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9034 FCP cmd x%x failed <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (ndlp) ? ndlp->nlp_DID : 0,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ wcqe->parameter, wcqe->total_data_placed,
+ lpfc_cmd->cur_iocbq.iotag);
+ }
+
+ switch (lpfc_cmd->status) {
+ case IOSTAT_SUCCESS:
+ cmd->result = DID_OK << 16;
+ break;
+ case IOSTAT_FCP_RSP_ERROR:
+ lpfc_handle_fcp_err(vport, lpfc_cmd,
+ pwqeIn->wqe.fcp_iread.total_xfer_len -
+ wcqe->total_data_placed);
+ break;
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ break;
+ fast_path_evt->un.fabric_evt.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.fabric_evt.subcategory =
+ (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
+ LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
+ if (ndlp) {
+ memcpy(&fast_path_evt->un.fabric_evt.wwpn,
+ &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.fabric_evt.wwnn,
+ &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ }
+ fast_path_evt->vport = vport;
+ fast_path_evt->work_evt.evt =
+ LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp,
+ &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9035 Fabric/Node busy FCP cmd x%x failed"
+ " <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (ndlp) ? ndlp->nlp_DID : 0,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ wcqe->parameter,
+ wcqe->total_data_placed,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+ break;
+ case IOSTAT_REMOTE_STOP:
+ if (ndlp) {
+ /* This I/O was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, ndlp,
+ lpfc_cmd->cur_iocbq.sli4_lxritag,
+ 0, 0);
+ }
+ fallthrough;
+ case IOSTAT_LOCAL_REJECT:
+ if (lpfc_cmd->result & IOERR_DRVR_MASK)
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+ lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+ cmd->result = DID_NO_CONNECT << 16;
+ break;
+ }
+ if (lpfc_cmd->result == IOERR_INVALID_RPI ||
+ lpfc_cmd->result == IOERR_NO_RESOURCES ||
+ lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+ lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
+ cmd->result = DID_REQUEUE << 16;
+ break;
+ }
+ if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
+ lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
+ status == CQE_STATUS_DI_ERROR) {
+ if (scsi_get_prot_op(cmd) !=
+ SCSI_PROT_NORMAL) {
+ /*
+ * This is a response for a BG enabled
+ * cmd. Parse BG error
+ */
+ lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
+ wcqe);
+ break;
+ }
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9040 non-zero BGSTAT on unprotected cmd\n");
+ }
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9036 Local Reject FCP cmd x%x failed"
+ " <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (ndlp) ? ndlp->nlp_DID : 0,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ wcqe->parameter,
+ wcqe->total_data_placed,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+ fallthrough;
+ default:
+ if (lpfc_cmd->status >= IOSTAT_CNT)
+ lpfc_cmd->status = IOSTAT_DEFAULT;
+ cmd->result = DID_ERROR << 16;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "9037 FCP Completion Error: xri %x "
+ "status x%x result x%x [x%x] "
+ "placed x%x\n",
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ lpfc_cmd->status, lpfc_cmd->result,
+ wcqe->parameter,
+ wcqe->total_data_placed);
+ }
+ if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
+ u32 *lp = (u32 *)cmd->sense_buffer;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "9039 Iodone <%d/%llu> cmd x%p, error "
+ "x%x SNS x%x x%x Data: x%x x%x\n",
+ cmd->device->id, cmd->device->lun, cmd,
+ cmd->result, *lp, *(lp + 3), cmd->retries,
+ scsi_get_resid(cmd));
+ }
+
+ lpfc_update_stats(vport, lpfc_cmd);
+
+ if (vport->cfg_max_scsicmpl_time &&
+ time_after(jiffies, lpfc_cmd->start_time +
+ msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (ndlp) {
+ if (ndlp->cmd_qdepth >
+ atomic_read(&ndlp->cmd_pending) &&
+ (atomic_read(&ndlp->cmd_pending) >
+ LPFC_MIN_TGT_QDEPTH) &&
+ (cmd->cmnd[0] == READ_10 ||
+ cmd->cmnd[0] == WRITE_10))
+ ndlp->cmd_qdepth =
+ atomic_read(&ndlp->cmd_pending);
+
+ ndlp->last_change_time = jiffies;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->ts_cmd_start) {
+ lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
+ lpfc_cmd->ts_data_io = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
+ lpfc_io_ktime(phba, lpfc_cmd);
+ }
+#endif
+ lpfc_cmd->pCmd = NULL;
+ spin_unlock(&lpfc_cmd->buf_lock);
+
+ /* The sdev is not guaranteed to be valid post scsi_done upcall. */
+ cmd->scsi_done(cmd);
+
+ /*
+ * If there is an abort thread waiting for command completion
+ * wake up the thread.
+ */
+ spin_lock(&lpfc_cmd->buf_lock);
+ lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ if (lpfc_cmd->waitq)
+ wake_up(lpfc_cmd->waitq);
+ spin_unlock(&lpfc_cmd->buf_lock);
+
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
}
/**
@@ -3903,7 +4454,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
switch (lpfc_cmd->status) {
case IOSTAT_FCP_RSP_ERROR:
/* Call FCP RSP handler to determine result */
- lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
+ lpfc_handle_fcp_err(vport, lpfc_cmd,
+ pIocbOut->iocb.un.fcpi.fcpi_parm);
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
@@ -3916,7 +4468,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
fast_path_evt->un.fabric_evt.subcategory =
(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
- if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if (pnode) {
memcpy(&fast_path_evt->un.fabric_evt.wwpn,
&pnode->nlp_portname,
sizeof(struct lpfc_name));
@@ -3971,7 +4523,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
&& (phba->sli_rev == LPFC_SLI_REV4)
- && (pnode && NLP_CHK_NODE_ACT(pnode))) {
+ && pnode) {
/* This IO was aborted by the target, we don't
* know the rxid and because we did not send the
* ABTS we cannot generate and RRQ.
@@ -3986,8 +4538,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
break;
}
- if (!pnode || !NLP_CHK_NODE_ACT(pnode)
- || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
SAM_STAT_BUSY;
} else
@@ -4009,7 +4560,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
- if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if (pnode) {
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
@@ -4053,72 +4604,30 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
/**
- * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
- * @data: A pointer to the immediate command data portion of the IOCB.
- * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
+ * @phba: Pointer to vport object for which I/O is executed
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ * @tmo: timeout value for the IO
*
- * The routine copies the entire FCP command from @fcp_cmnd to @data while
- * byte swapping the data to big endian format for transmission on the wire.
- **/
-static void
-lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
-{
- int i, j;
- for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
- i += sizeof(uint32_t), j++) {
- ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
- }
-}
-
-/**
- * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
- * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: The scsi command which needs to send.
- * @pnode: Pointer to lpfc_nodelist.
+ * Based on the data-direction of the command, initialize IOCB
+ * in the I/O buffer. Fill in the IOCB fields which are independent
+ * of the scsi buffer
*
- * This routine initializes fcp_cmnd and iocb data structure from scsi command
- * to transfer for device with SLI3 interface spec.
+ * RETURNS 0 - SUCCESS,
**/
-static void
-lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_nodelist *pnode)
+static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo)
{
- struct lpfc_hba *phba = vport->phba;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
- IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
- struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
- struct lpfc_sli4_hdw_queue *hdwq = NULL;
+ struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
int datadir = scsi_cmnd->sc_data_direction;
- int idx;
- uint8_t *ptr;
- bool sli4;
- uint32_t fcpdl;
-
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
- return;
-
- lpfc_cmd->fcp_rsp->rspSnsLen = 0;
- /* clear task management bits */
- lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
-
- int_to_scsilun(lpfc_cmd->pCmd->device->lun,
- &lpfc_cmd->fcp_cmnd->fcp_lun);
+ u32 fcpdl;
- ptr = &fcp_cmnd->fcpCdb[0];
- memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
- if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
- ptr += scsi_cmnd->cmd_len;
- memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
- }
-
- fcp_cmnd->fcpCntl1 = SIMPLE_Q;
-
- sli4 = (phba->sli_rev == LPFC_SLI_REV4);
piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
- idx = lpfc_cmd->hdwq_no;
- if (phba->sli4_hba.hdwq)
- hdwq = &phba->sli4_hba.hdwq[idx];
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -4132,42 +4641,31 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
iocb_cmd->ulpPU = PARM_READ_CHECK;
if (vport->cfg_first_burst_size &&
(pnode->nlp_flag & NLP_FIRSTBURST)) {
+ u32 xrdy_len;
+
fcpdl = scsi_bufflen(scsi_cmnd);
- if (fcpdl < vport->cfg_first_burst_size)
- piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
- else
- piocbq->iocb.un.fcpi.fcpi_XRdy =
- vport->cfg_first_burst_size;
+ xrdy_len = min(fcpdl,
+ vport->cfg_first_burst_size);
+ piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
- if (hdwq)
- hdwq->scsi_cstat.output_requests++;
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
fcp_cmnd->fcpCntl3 = READ_DATA;
- if (hdwq)
- hdwq->scsi_cstat.input_requests++;
}
} else {
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
fcp_cmnd->fcpCntl3 = 0;
- if (hdwq)
- hdwq->scsi_cstat.control_requests++;
}
- if (phba->sli_rev == 3 &&
- !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
- lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
+
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
- if (sli4)
- piocbq->iocb.ulpContext =
- phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
else
@@ -4175,9 +4673,159 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
piocbq->context1 = lpfc_cmd;
- piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
- piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+ if (!piocbq->iocb_cmpl)
+ piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+ piocbq->iocb.ulpTimeout = tmo;
piocbq->vport = vport;
+ return 0;
+}
+
+/**
+ * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
+ * @phba: Pointer to vport object for which I/O is executed
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ * @tmo: timeout value for the IO
+ *
+ * Based on the data-direction of the command copy WQE template
+ * to I/O buffer WQE. Fill in the WQE fields which are independent
+ * of the scsi buffer
+ *
+ * RETURNS 0 - SUCCESS,
+ **/
+static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct lpfc_sli4_hdw_queue *hdwq = NULL;
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+ u16 idx = lpfc_cmd->hdwq_no;
+ int datadir = scsi_cmnd->sc_data_direction;
+
+ hdwq = &phba->sli4_hba.hdwq[idx];
+
+ /* Initialize 64 bytes only */
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither.
+ */
+ if (scsi_sg_count(scsi_cmnd)) {
+ if (datadir == DMA_TO_DEVICE) {
+ /* From the iwrite template, initialize words 7 - 11 */
+ memcpy(&wqe->words[7],
+ &lpfc_iwrite_cmd_template.words[7],
+ sizeof(uint32_t) * 5);
+
+ fcp_cmnd->fcpCntl3 = WRITE_DATA;
+ if (hdwq)
+ hdwq->scsi_cstat.output_requests++;
+ } else {
+ /* From the iread template, initialize words 7 - 11 */
+ memcpy(&wqe->words[7],
+ &lpfc_iread_cmd_template.words[7],
+ sizeof(uint32_t) * 5);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
+
+ fcp_cmnd->fcpCntl3 = READ_DATA;
+ if (hdwq)
+ hdwq->scsi_cstat.input_requests++;
+ }
+ } else {
+ /* From the icmnd template, initialize words 4 - 11 */
+ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
+ sizeof(uint32_t) * 8);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
+
+ fcp_cmnd->fcpCntl3 = 0;
+ if (hdwq)
+ hdwq->scsi_cstat.control_requests++;
+ }
+
+ /*
+ * Finish initializing those WQE fields that are independent
+ * of the request_buffer
+ */
+
+ /* Word 3 */
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
+
+ /* Word 7*/
+ if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
+ bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+
+ bf_set(wqe_class, &wqe->generic.wqe_com,
+ (pnode->nlp_fcp_info & 0x0f));
+
+ /* Word 8 */
+ wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+
+ pwqeq->vport = vport;
+ pwqeq->vport = vport;
+ pwqeq->context1 = lpfc_cmd;
+ pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
+ pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
+
+ return 0;
+}
+
+/**
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer for device with SLI3 interface spec.
+ **/
+static int
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
+ struct lpfc_nodelist *pnode)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ u8 *ptr;
+
+ if (!pnode)
+ return 0;
+
+ lpfc_cmd->fcp_rsp->rspSnsLen = 0;
+ /* clear task management bits */
+ lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
+
+ int_to_scsilun(lpfc_cmd->pCmd->device->lun,
+ &lpfc_cmd->fcp_cmnd->fcp_lun);
+
+ ptr = &fcp_cmnd->fcpCdb[0];
+ memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
+ ptr += scsi_cmnd->cmd_len;
+ memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
+ }
+
+ fcp_cmnd->fcpCntl1 = SIMPLE_Q;
+
+ lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
+
+ return 0;
}
/**
@@ -4206,8 +4854,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
- ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
return 0;
piocbq = &(lpfc_cmd->cur_iocbq);
@@ -4264,7 +4911,6 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
- phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
@@ -4272,19 +4918,20 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
+ phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
+ phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1418 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
@@ -4466,12 +5113,11 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
/**
* lpfc_poll_timeout - Restart polling timer
- * @ptr: Map to lpfc_hba data structure pointer.
+ * @t: Timer construct where lpfc_hba data structure pointer is obtained.
*
* This routine restarts fcp_poll timer, when FCP ring polling is enable
* and FCP Ring interrupt is disable.
**/
-
void lpfc_poll_timeout(struct timer_list *t)
{
struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
@@ -4487,8 +5133,8 @@ void lpfc_poll_timeout(struct timer_list *t)
/**
* lpfc_queuecommand - scsi_host_template queuecommand entry point
+ * @shost: kernel scsi host pointer.
* @cmnd: Pointer to scsi_cmnd data structure.
- * @done: Pointer to done routine.
*
* Driver registers this routine to scsi midlayer to submit a @cmd to process.
* This routine prepares an IOCB from scsi command and provides to firmware.
@@ -4544,7 +5190,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
* Catch race where our node has transitioned, but the
* transport is still transitioning.
*/
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
goto out_tgt_busy;
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
@@ -4594,8 +5240,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->ndlp = ndlp;
+ lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
+ err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+ if (err)
+ goto out_host_busy_release_buf;
+
if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
if (vport->phba->cfg_enable_bg) {
lpfc_printf_vlog(vport,
@@ -4631,14 +5282,15 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
goto out_host_busy_free_buf;
}
- lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
#endif
- err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
- &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+ /* Issue I/O to adapter */
+ err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
+ &lpfc_cmd->cur_iocbq,
+ SLI_IOCB_RET_IOCB);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (start) {
lpfc_cmd->ts_cmd_start = start;
@@ -4650,24 +5302,30 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
#endif
if (err) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "3376 FCP could not issue IOCB err %x"
- "FCP cmd x%x <%d/%llu> "
- "sid: x%x did: x%x oxid: x%x "
- "Data: x%x x%x x%x x%x\n",
- err, cmnd->cmnd[0],
- cmnd->device ? cmnd->device->id : 0xffff,
- cmnd->device ? cmnd->device->lun : (u64) -1,
- vport->fc_myDID, ndlp->nlp_DID,
- phba->sli_rev == LPFC_SLI_REV4 ?
- lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
- lpfc_cmd->cur_iocbq.iocb.ulpContext,
- lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
- lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
- (uint32_t)
- (cmnd->request->timeout / 1000));
+ "3376 FCP could not issue IOCB err %x "
+ "FCP cmd x%x <%d/%llu> "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x x%x\n",
+ err, cmnd->cmnd[0],
+ cmnd->device ? cmnd->device->id : 0xffff,
+ cmnd->device ? cmnd->device->lun : (u64)-1,
+ vport->fc_myDID, ndlp->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
+ lpfc_cmd->cur_iocbq.iocb.ulpContext,
+ lpfc_cmd->cur_iocbq.iotag,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ bf_get(wqe_tmo,
+ &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
+ lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
+ (uint32_t)
+ (cmnd->request->timeout / 1000));
goto out_host_busy_free_buf;
}
+
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
@@ -4696,6 +5354,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
}
}
+ out_host_busy_release_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -4729,11 +5388,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocb;
- struct lpfc_iocbq *abtsiocb;
struct lpfc_io_buf *lpfc_cmd;
- IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
struct lpfc_sli_ring *pring_s4 = NULL;
+ struct lpfc_sli_ring *pring = NULL;
int ret_val;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
@@ -4810,64 +5468,22 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto wait_for_cmpl;
}
- abtsiocb = __lpfc_sli_get_iocbq(phba);
- if (abtsiocb == NULL) {
- ret = FAILED;
- goto out_unlock_ring;
- }
-
- /* Indicate the IO is being aborted by the driver. */
- iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
-
- /*
- * The scsi command can not be in txq and it is in flight because the
- * pCmd is still pointig at the SCSI command we have to abort. There
- * is no need to search the txcmplq. Just send an abort to the FW.
- */
-
- cmd = &iocb->iocb;
- icmd = &abtsiocb->iocb;
- icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
- icmd->un.acxri.abortContextTag = cmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
- else
- icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
-
- icmd->ulpLe = 1;
- icmd->ulpClass = cmd->ulpClass;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocb->hba_wqidx = iocb->hba_wqidx;
- abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocb->iocb_flag & LPFC_IO_FOF)
- abtsiocb->iocb_flag |= LPFC_IO_FOF;
-
- if (lpfc_is_link_up(phba))
- icmd->ulpCommand = CMD_ABORT_XRI_CN;
- else
- icmd->ulpCommand = CMD_CLOSE_XRI_CN;
-
- abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
- abtsiocb->vport = vport;
lpfc_cmd->waitq = &waitq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- /* Note: both hbalock and ring_lock must be set here */
- ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
- abtsiocb, 0);
spin_unlock(&pring_s4->ring_lock);
+ ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
+ lpfc_sli4_abort_fcp_cmpl);
} else {
- ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
- abtsiocb, 0);
+ pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ lpfc_sli_abort_fcp_cmpl);
}
- if (ret_val == IOCB_ERROR) {
+ if (ret_val != IOCB_SUCCESS) {
/* Indicate the IO is not being aborted by the driver. */
- iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
}
@@ -4881,7 +5497,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
wait_for_cmpl:
- /* Wait for abort to complete */
+ /*
+ * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
+ * for abort to complete.
+ */
wait_event_timeout(waitq,
(lpfc_cmd->pCmd != cmnd),
msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
@@ -5016,7 +5635,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
/**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed.
- * @rdata: Pointer to remote port local data
+ * @cmnd: Pointer to scsi_cmnd data structure.
* @tgt_id: Target ID of remote device.
* @lun_id: Lun number for the TMF
* @task_mgmt_cmd: type of TMF to send
@@ -5043,7 +5662,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
int status;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
- if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
+ if (!rdata || !rdata->pnode)
return FAILED;
pnode = rdata->pnode;
@@ -5147,7 +5766,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
*/
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies)) {
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ if (!pnode)
return FAILED;
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
@@ -5157,8 +5776,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
return FAILED;
pnode = rdata->pnode;
}
- if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
- (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
return FAILED;
return SUCCESS;
}
@@ -5320,10 +5938,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0722 Target Reset rport failure: rdata x%px\n", rdata);
if (pnode) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&pnode->lock);
pnode->nlp_flag &= ~NLP_NPR_ADISC;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&pnode->lock);
}
lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
@@ -5402,8 +6020,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
match = 0;
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
+
if (vport->phba->cfg_fcp2_no_tgt_reset &&
(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
continue;
@@ -5665,10 +6282,11 @@ lpfc_slave_destroy(struct scsi_device *sdev)
/**
* lpfc_create_device_data - creates and initializes device data structure for OAS
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @lun: Lun on target
+ * @pri: Priority
* @atomic_create: Flag to indicate if memory should be allocated using the
* GFP_ATOMIC flag or not.
*
@@ -5718,7 +6336,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/**
* lpfc_delete_device_data - frees a device data structure for OAS
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @lun_info: Pointer to device data structure to free.
*
* This routine frees the previously allocated device data structure passed.
@@ -5741,7 +6359,7 @@ lpfc_delete_device_data(struct lpfc_hba *phba,
/**
* __lpfc_get_device_data - returns the device data for the specified lun
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @list: Point to list to search.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
@@ -5783,7 +6401,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
/**
* lpfc_find_next_oas_lun - searches for the next oas lun
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @starting_lun: Pointer to the lun to start searching for
@@ -5791,6 +6409,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
* @found_target_wwpn: Pointer to the found lun's target wwpn information
* @found_lun: Pointer to the found lun.
* @found_lun_status: Pointer to status of the found lun.
+ * @found_lun_pri: Pointer to priority of the found lun.
*
* This routine searches the luns list for the specified lun
* or the first lun for the vport/target. If the vport wwpn contains
@@ -5885,10 +6504,11 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/**
* lpfc_enable_oas_lun - enables a lun for OAS operations
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @lun: Lun
+ * @pri: Priority
*
* This routine enables a lun for oas operations. The routines does so by
* doing the following :
@@ -5945,10 +6565,11 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/**
* lpfc_disable_oas_lun - disables a lun for OAS operations
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @lun: Lun
+ * @pri: Priority
*
* This routine disables a lun for oas operations. The routines does so by
* doing the following :
@@ -6029,7 +6650,7 @@ struct scsi_host_template lpfc_template_nvme = {
.sg_tablesize = 1,
.cmd_per_lun = 1,
.shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFF,
+ .max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.track_queue_depth = 0,
};
@@ -6054,7 +6675,7 @@ struct scsi_host_template lpfc_template = {
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFF,
+ .max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e158cd77d387..95caad764fb7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -90,12 +90,138 @@ static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
struct lpfc_queue *cq,
struct lpfc_cqe *cqe);
+union lpfc_wqe128 lpfc_iread_cmd_template;
+union lpfc_wqe128 lpfc_iwrite_cmd_template;
+union lpfc_wqe128 lpfc_icmnd_cmd_template;
+
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
{
return &iocbq->iocb;
}
+/* Setup WQE templates for IOs */
+void lpfc_wqe_cmd_template(void)
+{
+ union lpfc_wqe128 *wqe;
+
+ /* IREAD template */
+ wqe = &lpfc_iread_cmd_template;
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* Word 0, 1, 2 - BDE is variable */
+
+ /* Word 3 - cmd_buff_len, payload_offset_len is zero */
+
+ /* Word 4 - total_xfer_len is variable */
+
+ /* Word 5 - is zero */
+
+ /* Word 6 - ctxt_tag, xri_tag is variable */
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
+ bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
+ bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
+ bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 - abort_tag is variable */
+
+ /* Word 9 - reqtag is variable */
+
+ /* Word 10 - dbde, wqes is variable */
+ bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
+
+ /* Word 11 - pbde is variable */
+ bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
+ bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
+
+ /* Word 12 - is zero */
+
+ /* Word 13, 14, 15 - PBDE is variable */
+
+ /* IWRITE template */
+ wqe = &lpfc_iwrite_cmd_template;
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* Word 0, 1, 2 - BDE is variable */
+
+ /* Word 3 - cmd_buff_len, payload_offset_len is zero */
+
+ /* Word 4 - total_xfer_len is variable */
+
+ /* Word 5 - initial_xfer_len is variable */
+
+ /* Word 6 - ctxt_tag, xri_tag is variable */
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
+ bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
+ bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
+ bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 - abort_tag is variable */
+
+ /* Word 9 - reqtag is variable */
+
+ /* Word 10 - dbde, wqes is variable */
+ bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+
+ /* Word 11 - pbde is variable */
+ bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
+ bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
+
+ /* Word 12 - is zero */
+
+ /* Word 13, 14, 15 - PBDE is variable */
+
+ /* ICMND template */
+ wqe = &lpfc_icmnd_cmd_template;
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* Word 0, 1, 2 - BDE is variable */
+
+ /* Word 3 - payload_offset_len is variable */
+
+ /* Word 4, 5 - is zero */
+
+ /* Word 6 - ctxt_tag, xri_tag is variable */
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
+ bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
+ bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 - abort_tag is variable */
+
+ /* Word 9 - reqtag is variable */
+
+ /* Word 10 - dbde, wqes is variable */
+ bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
+ bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
+ bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
+
+ /* Word 12, 13, 14, 15 - is zero */
+}
+
#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
/**
* lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
@@ -150,6 +276,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
/* sanity check on queue memory */
if (unlikely(!q))
return -ENOMEM;
+
temp_wqe = lpfc_sli4_qe(q, q->host_index);
/* If the host has not yet processed the next entry then we are done */
@@ -860,7 +987,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
{
struct lpfc_nodelist *ndlp = NULL;
- if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+ if (rrq->vport)
ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
/* The target DID could have been swapped (cable swap)
@@ -1061,12 +1188,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
goto out;
}
- /*
- * set the active bit even if there is no mem available.
- */
- if (NLP_CHK_FREE_REQ(ndlp))
- goto out;
-
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
@@ -1289,6 +1410,11 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
(sglq->state != SGL_XRI_ABORTED)) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
iflag);
+
+ /* Check if we can get a reference on ndlp */
+ if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
+ sglq->ndlp = NULL;
+
list_add(&sglq->list,
&phba->sli4_hba.lpfc_abts_els_sgl_list);
spin_unlock_irqrestore(
@@ -2449,10 +2575,10 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
- spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
}
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
@@ -2529,9 +2655,10 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport,
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
"1438 UNREG cmpl deferred mbox x%x "
- "on NPort x%x Data: x%x x%x %px\n",
+ "on NPort x%x Data: x%x x%x %px x%x x%x\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
+ ndlp->nlp_flag, ndlp->nlp_defer_did,
+ ndlp, vport->load_flag, kref_read(&ndlp->kref));
if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
(ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
@@ -2541,8 +2668,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
- if (vport->load_flag & FC_UNLOADING)
- lpfc_nlp_put(ndlp);
+
+ /* The unreg_login mailbox is complete and had a
+ * reference that has to be released. The PLOGI
+ * got its own ref.
+ */
+ lpfc_nlp_put(ndlp);
pmb->ctx_ndlp = NULL;
}
}
@@ -2566,7 +2697,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*
* This function is the unreg rpi mailbox completion handler. It
* frees the memory resources associated with the completed mailbox
- * command. An additional refrenece is put on the ndlp to prevent
+ * command. An additional reference is put on the ndlp to prevent
* lpfc_nlp_release from freeing the rpi bit in the bitmask before
* the unreg mailbox command completes, this routine puts the
* reference back.
@@ -2586,16 +2717,15 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
LPFC_SLI_INTF_IF_TYPE_2)) {
if (ndlp) {
lpfc_printf_vlog(
- vport, KERN_INFO, LOG_MBOX | LOG_SLI,
+ vport, KERN_INFO, LOG_MBOX | LOG_SLI,
"0010 UNREG_LOGIN vpi:%x "
"rpi:%x DID:%x defer x%x flg x%x "
- "map:%x %px\n",
+ "%px\n",
vport->vpi, ndlp->nlp_rpi,
ndlp->nlp_DID, ndlp->nlp_defer_did,
ndlp->nlp_flag,
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- lpfc_nlp_put(ndlp);
/* Check to see if there are any deferred
* events to process
@@ -2618,6 +2748,8 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
+
+ lpfc_nlp_put(ndlp);
}
}
}
@@ -2852,7 +2984,7 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
/* validate the source of the LS is logged in */
ndlp = lpfc_findnode_did(phba->pport, sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
@@ -4077,7 +4209,7 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
spin_lock_irq(&phba->hbalock);
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
} else {
spin_lock_irq(&phba->hbalock);
@@ -4086,7 +4218,7 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
@@ -7248,12 +7380,16 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list);
- spin_lock_irqsave(&phba->hbalock, flags);
rqbp = hrq->rqbp;
for (i = 0; i < count; i++) {
+ spin_lock_irqsave(&phba->hbalock, flags);
/* IF RQ is already full, don't bother */
- if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
+ if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
break;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
rqb_buffer = rqbp->rqb_alloc_buffer(phba);
if (!rqb_buffer)
break;
@@ -7262,6 +7398,8 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rqb_buffer->idx = idx;
list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
}
+
+ spin_lock_irqsave(&phba->hbalock, flags);
while (!list_empty(&rqb_buf_list)) {
list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
hbuf.list);
@@ -9189,7 +9327,6 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
"1420 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
return 0;
}
@@ -10072,7 +10209,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
"2014 Invalid command 0x%x\n",
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
- break;
}
if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
@@ -10094,6 +10230,96 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
/**
+ * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue wqe on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
+ * send an iocb command to an HBA with SLI-4 interface spec.
+ *
+ * This function takes the hbalock before invoking the lockless version.
+ * The function will return success after it successfully submit the wqe to
+ * firmware or after adding to the txq.
+ **/
+static int
+__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ unsigned long iflags;
+ int rc;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return rc;
+}
+
+/**
+ * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue wqe on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
+ * an wqe command to an HBA with SLI-4 interface spec.
+ *
+ * This function is a lockless version. The function will return success
+ * after it successfully submit the wqe to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ int rc;
+ struct lpfc_io_buf *lpfc_cmd =
+ (struct lpfc_io_buf *)piocb->context1;
+ union lpfc_wqe128 *wqe = &piocb->wqe;
+ struct sli4_sge *sgl;
+
+ /* 128 byte wqe support here */
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
+
+ if (phba->fcp_embed_io) {
+ struct fcp_cmnd *fcp_cmnd;
+ u32 *ptr;
+
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+ /* Word 0-2 - FCP_CMND */
+ wqe->generic.bde.tus.f.bdeFlags =
+ BUFF_TYPE_BDE_IMMED;
+ wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
+ wqe->generic.bde.addrHigh = 0;
+ wqe->generic.bde.addrLow = 88; /* Word 22 */
+
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
+
+ /* Word 22-29 FCP CMND Payload */
+ ptr = &wqe->words[22];
+ memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+ } else {
+ /* Word 0-2 - Inline BDE */
+ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
+ wqe->generic.bde.addrHigh = sgl->addr_hi;
+ wqe->generic.bde.addrLow = sgl->addr_lo;
+
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
+ }
+
+ rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
+ return rc;
+}
+
+/**
* __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
* @ring_number: SLI ring number to issue iocb on.
@@ -10159,9 +10385,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
}
}
- } else if (piocb->iocb_flag & LPFC_IO_FCP)
+ } else if (piocb->iocb_flag & LPFC_IO_FCP) {
/* These IO's already have an XRI and a mapped sgl. */
sglq = NULL;
+ }
else {
/*
* This is a continuation of a commandi,(CX) so this
@@ -10189,6 +10416,25 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return 0;
}
+/**
+ * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
+ *
+ * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
+ * or IOCB for sli-3 function.
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * IOCB_ERROR - Error
+ * IOCB_SUCCESS - Success
+ * IOCB_BUSY - Busy
+ **/
+int
+lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
+}
+
/*
* __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
*
@@ -10224,17 +10470,18 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
case LPFC_PCI_DEV_LP:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+ phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
break;
case LPFC_PCI_DEV_OC:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
+ phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1419 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
return 0;
@@ -10364,6 +10611,32 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
+static void
+lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+ struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+ }
+
+ /* Incrementing the reference count until the queued work is done. */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (!evtp->evt_arg1) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+ }
+ evtp->evt = LPFC_EVT_RECOVER_PORT;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ lpfc_worker_wake_up(phba);
+}
+
/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to iocb object.
@@ -10397,7 +10670,7 @@ lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
if (!vport)
goto err_exit;
ndlp = lpfc_findnode_rpi(vport, rpi);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
goto err_exit;
if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
@@ -10427,17 +10700,15 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp,
struct sli4_wcqe_xri_aborted *axri)
{
- struct lpfc_vport *vport;
uint32_t ext_status = 0;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3115 Node Context not found, driver "
"ignoring abts err event\n");
return;
}
- vport = ndlp->vport;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3116 Port generated FCP XRI ABORT event on "
"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
@@ -10454,7 +10725,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
ext_status = axri->parameter & IOERR_PARAM_MASK;
if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
- lpfc_sli_abts_recover_port(vport, ndlp);
+ lpfc_sli_post_recovery_event(phba, ndlp);
}
/**
@@ -10908,7 +11179,8 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
&pring->txcmplq, list) {
if (iocb->vport != vport)
continue;
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ NULL);
}
pring->flag = prev_pring_flag;
}
@@ -10935,7 +11207,8 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
&pring->txcmplq, list) {
if (iocb->vport != vport)
continue;
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ NULL);
}
pring->flag = prev_pring_flag;
}
@@ -11324,35 +11597,37 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
+ lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
lpfc_ct_free_iocb(phba, cmdiocb);
else
lpfc_els_free_iocb(phba, cmdiocb);
- return;
}
/**
- * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @cmdiocb: Pointer to driver command iocb object.
+ * @cmpl: completion function.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
+ * when the command iocb is an abort request.
*
- * This function issues an abort iocb for the provided command iocb down to
- * the port. Other than the case the outstanding command iocb is an abort
- * request, this function issues abort out unconditionally. This function is
- * called with hbalock held. The function returns 0 when it fails due to
- * memory allocation failure or when the command iocb is an abort request.
- * The hbalock is asserted held in the code path calling this routine.
**/
-static int
-lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *cmdiocb)
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb, void *cmpl)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
- int retval;
+ int retval = IOCB_ERROR;
unsigned long iflags;
struct lpfc_nodelist *ndlp;
@@ -11365,12 +11640,33 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
- return 0;
+ return IOCB_ABORTING;
+
+ if (!pring) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ return retval;
+ }
+
+ /*
+ * If we're unloading, don't abort iocb on the ELS ring, but change
+ * the callback so that nothing happens when it finishes.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ pring->ringno == LPFC_ELS_RING) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ return retval;
+ }
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
- return 0;
+ return IOCB_NORESOURCE;
/* This signals the response to set the correct status
* before calling the completion handler
@@ -11382,7 +11678,8 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iabt->un.acxri.abortContextTag = icmd->ulpContext;
if (phba->sli_rev == LPFC_SLI_REV4) {
iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
- iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+ if (pring->ringno == LPFC_ELS_RING)
+ iabt->un.acxri.abortContextTag = cmdiocb->iotag;
} else {
iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
if (pring->ringno == LPFC_ELS_RING) {
@@ -11395,8 +11692,10 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+ if (cmdiocb->iocb_flag & LPFC_IO_FCP) {
+ abtsiocbp->iocb_flag |= LPFC_IO_FCP;
abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
+ }
if (cmdiocb->iocb_flag & LPFC_IO_FOF)
abtsiocbp->iocb_flag |= LPFC_IO_FOF;
@@ -11405,20 +11704,16 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
else
iabt->ulpCommand = CMD_CLOSE_XRI_CN;
- abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+ if (cmpl)
+ abtsiocbp->iocb_cmpl = cmpl;
+ else
+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
abtsiocbp->vport = vport;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0339 Abort xri x%x, original iotag x%x, "
- "abort cmd iotag x%x\n",
- iabt->un.acxri.abortIoTag,
- iabt->un.acxri.abortContextTag,
- abtsiocbp->iotag);
-
if (phba->sli_rev == LPFC_SLI_REV4) {
pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
if (unlikely(pring == NULL))
- return 0;
+ goto abort_iotag_exit;
/* Note: both hbalock and ring_lock need to be set here */
spin_lock_irqsave(&pring->ring_lock, iflags);
retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
@@ -11429,76 +11724,20 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp, 0);
}
- if (retval)
- __lpfc_sli_release_iocbq(phba, abtsiocbp);
-
- /*
- * Caller to this routine should check for IOCB_ERROR
- * and handle it properly. This routine no longer removes
- * iocb off txcmplq and call compl in case of IOCB_ERROR.
- */
- return retval;
-}
-
-/**
- * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- * @cmdiocb: Pointer to driver command iocb object.
- *
- * This function issues an abort iocb for the provided command iocb. In case
- * of unloading, the abort iocb will not be issued to commands on the ELS
- * ring. Instead, the callback function shall be changed to those commands
- * so that nothing happens when them finishes. This function is called with
- * hbalock held. The function returns 0 when the command iocb is an abort
- * request.
- **/
-int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *cmdiocb)
-{
- struct lpfc_vport *vport = cmdiocb->vport;
- int retval = IOCB_ERROR;
- IOCB_t *icmd = NULL;
-
- lockdep_assert_held(&phba->hbalock);
-
- /*
- * There are certain command types we don't want to abort. And we
- * don't want to abort commands that are already in the process of
- * being aborted.
- */
- icmd = &cmdiocb->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
- return 0;
+abort_iotag_exit:
- if (!pring) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
- goto abort_iotag_exit;
- }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0339 Abort xri x%x, original iotag x%x, "
+ "abort cmd iotag x%x retval x%x\n",
+ iabt->un.acxri.abortIoTag,
+ iabt->un.acxri.abortContextTag,
+ abtsiocbp->iotag, retval);
- /*
- * If we're unloading, don't abort iocb on the ELS ring, but change
- * the callback so that nothing happens when it finishes.
- */
- if ((vport->load_flag & FC_UNLOADING) &&
- (pring->ringno == LPFC_ELS_RING)) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
- goto abort_iotag_exit;
+ if (retval) {
+ cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ __lpfc_sli_release_iocbq(phba, abtsiocbp);
}
- /* Now, we try to issue the abort to the cmdiocb out */
- retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
-
-abort_iotag_exit:
/*
* Caller to this routine should check for IOCB_ERROR
* and handle it properly. This routine no longer removes
@@ -11643,6 +11882,33 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
}
/**
+ * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+ * @wcqe: pointer to the complete wcqe
+ *
+ * This function is called when an aborted FCP iocb completes. This
+ * function is called by the ring event handler with no lock held.
+ * This function frees the iocb. It is called for sli-4 adapters.
+ **/
+void
+lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3017 ABORT_XRI_CN completing on rpi x%x "
+ "original iotag x%x, abort cmd iotag x%x "
+ "status 0x%x, reason 0x%x\n",
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ cmdiocb->iocb.un.acxri.abortIoTag,
+ cmdiocb->iotag,
+ (bf_get(lpfc_wcqe_c_status, wcqe)
+ & LPFC_IOCB_STATUS_MASK),
+ wcqe->parameter);
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
* lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
@@ -11695,10 +11961,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
- struct lpfc_iocbq *abtsiocb;
- struct lpfc_sli_ring *pring_s4;
- IOCB_t *cmd = NULL;
int errcnt = 0, ret_val = 0;
+ unsigned long iflags;
int i;
/* all I/Os are in process of being flushed */
@@ -11712,62 +11976,12 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abort_cmd) != 0)
continue;
- /*
- * If the iocbq is already being aborted, don't take a second
- * action, but do count it.
- */
- if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
- continue;
-
- /* issue ABTS for this IOCB based on iotag */
- abtsiocb = lpfc_sli_get_iocbq(phba);
- if (abtsiocb == NULL) {
- errcnt++;
- continue;
- }
-
- /* indicate the IO is being aborted by the driver. */
- iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
-
- cmd = &iocbq->iocb;
- abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
- abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
- else
- abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
- abtsiocb->iocb.ulpLe = 1;
- abtsiocb->iocb.ulpClass = cmd->ulpClass;
- abtsiocb->vport = vport;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocb->hba_wqidx = iocbq->hba_wqidx;
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocbq->iocb_flag & LPFC_IO_FOF)
- abtsiocb->iocb_flag |= LPFC_IO_FOF;
-
- if (lpfc_is_link_up(phba))
- abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
- else
- abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
-
- /* Setup callback routine and issue the command. */
- abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
- if (!pring_s4)
- continue;
- ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
- abtsiocb, 0);
- } else
- ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
- abtsiocb, 0);
- if (ret_val == IOCB_ERROR) {
- lpfc_sli_release_iocbq(phba, abtsiocb);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
+ lpfc_sli_abort_fcp_cmpl);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (ret_val != IOCB_SUCCESS)
errcnt++;
- continue;
- }
}
return errcnt;
@@ -13062,23 +13276,30 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
{
struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
/* First, declare the els xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
/* Now, handle all the els xri abort events */
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
/* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
/* Notify aborted XRI for ELS work queue */
lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+
/* Free the event processed back to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event);
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
}
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
}
/**
@@ -13289,9 +13510,13 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
if (!cq_event)
return false;
- spin_lock_irqsave(&phba->hbalock, iflags);
+
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
+
/* Set the async event flag */
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag |= ASYNC_EVENT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -13375,6 +13600,12 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
pmbox->un.varWords[0], pmb);
pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
pmb->ctx_buf = mp;
+
+ /* No reference taken here. This is a default
+ * RPI reg/immediate unreg cycle. The reference was
+ * taken in the reg rpi path and is released when
+ * this mailbox completes.
+ */
pmb->ctx_ndlp = ndlp;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -13566,17 +13797,20 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
break;
case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
- cq_event = lpfc_cq_event_setup(
- phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
- if (!cq_event)
- return false;
+ cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
+ if (!cq_event) {
+ workposted = false;
+ break;
+ }
cq_event->hdwq = cq->hdwq;
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
/* Set the els xri abort event flag */
phba->hba_flag |= ELS_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
workposted = true;
break;
default:
@@ -14046,7 +14280,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
#endif
if (cmdiocbq->iocb_cmpl == NULL) {
if (cmdiocbq->wqe_cmpl) {
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ /* For FCP the flag is cleared in wqe_cmpl */
+ if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
+ cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -14063,6 +14299,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
return;
}
+ /* Only SLI4 non-IO commands stil use IOCB */
/* Fake the irspiocb and copy necessary response information */
lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
@@ -15850,12 +16087,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
else
wq_create_version = LPFC_Q_CREATE_VERSION_0;
-
- if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
- wq_create_version = LPFC_Q_CREATE_VERSION_1;
- else
- wq_create_version = LPFC_Q_CREATE_VERSION_0;
-
switch (wq_create_version) {
case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
@@ -17864,15 +18095,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
}
/* Put ndlp onto pport node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "3275 Failed to active ndlp found "
- "for oxid:x%x SID:x%x\n", oxid, sid);
- return;
- }
}
/* Allocate buffer for rsp iocb */
@@ -17897,6 +18119,10 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
icmd->ulpClass = CLASS3;
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!ctiocb->context1) {
+ lpfc_sli_release_iocbq(phba, ctiocb);
+ return;
+ }
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
@@ -19840,7 +20066,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct lpfc_nodelist *act_mbx_ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
LIST_HEAD(mbox_cmd_list);
uint8_t restart_loop;
@@ -19894,9 +20119,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
restart_loop = 1;
spin_unlock_irq(&phba->hbalock);
- spin_lock(shost->host_lock);
+ spin_lock(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock(shost->host_lock);
+ spin_unlock(&ndlp->lock);
spin_lock_irq(&phba->hbalock);
break;
}
@@ -19918,9 +20143,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
mb->ctx_ndlp = NULL;
if (ndlp) {
- spin_lock(shost->host_lock);
+ spin_lock(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock(shost->host_lock);
+ spin_unlock(&ndlp->lock);
lpfc_nlp_put(ndlp);
}
}
@@ -19929,9 +20154,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
/* Release the ndlp with the cleaned-up active mailbox command */
if (act_mbx_ndlp) {
- spin_lock(shost->host_lock);
+ spin_lock(&act_mbx_ndlp->lock);
act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock(shost->host_lock);
+ spin_unlock(&act_mbx_ndlp->lock);
lpfc_nlp_put(act_mbx_ndlp);
}
}
@@ -20213,7 +20438,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
/* NVME_FCREQ and NVME_ABTS requests */
- if (pwqe->iocb_flag & LPFC_IO_NVME) {
+ if (pwqe->iocb_flag & LPFC_IO_NVME ||
+ pwqe->iocb_flag & LPFC_IO_FCP) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
@@ -20266,6 +20492,88 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
return WQE_ERROR;
}
+/**
+ * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @cmpl: completion function.
+ *
+ * Fill the appropriate fields for the abort WQE and call
+ * internal routine lpfc_sli4_issue_wqe to send the WQE
+ * This function is called with hbalock held and no ring_lock held.
+ *
+ * RETURNS 0 - SUCCESS
+ **/
+
+int
+lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ void *cmpl)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_iocbq *abtsiocb = NULL;
+ union lpfc_wqe128 *abtswqe;
+ struct lpfc_io_buf *lpfc_cmd;
+ int retval = IOCB_ERROR;
+ u16 xritag = cmdiocb->sli4_xritag;
+
+ /*
+ * The scsi command can not be in txq and it is in flight because the
+ * pCmd is still pointing at the SCSI command we have to abort. There
+ * is no need to search the txcmplq. Just send an abort to the FW.
+ */
+
+ abtsiocb = __lpfc_sli_get_iocbq(phba);
+ if (!abtsiocb)
+ return WQE_NORESOURCE;
+
+ /* Indicate the IO is being aborted by the driver. */
+ cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ abtswqe = &abtsiocb->wqe;
+ memset(abtswqe, 0, sizeof(*abtswqe));
+
+ if (lpfc_is_link_up(phba))
+ bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
+ else
+ bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0);
+ bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
+ abtswqe->abort_cmd.rsrvd5 = 0;
+ abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
+ bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
+ bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+ bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
+ bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
+ abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+ abtsiocb->iocb_flag |= LPFC_IO_FCP;
+ if (cmdiocb->iocb_flag & LPFC_IO_NVME)
+ abtsiocb->iocb_flag |= LPFC_IO_NVME;
+ if (cmdiocb->iocb_flag & LPFC_IO_FOF)
+ abtsiocb->iocb_flag |= LPFC_IO_FOF;
+ abtsiocb->vport = vport;
+ abtsiocb->wqe_cmpl = cmpl;
+
+ lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
+ retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "0359 Abort xri x%x, original iotag x%x, "
+ "abort cmd iotag x%x retval x%x\n",
+ xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
+
+ if (retval) {
+ cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ __lpfc_sli_release_iocbq(phba, abtsiocb);
+ }
+
+ return retval;
+}
+
#ifdef LPFC_MXP_STAT
/**
* lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 93d976ea8c5d..4f6936014ff5 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -130,6 +130,9 @@ struct lpfc_iocbq {
#define IOCB_BUSY 1
#define IOCB_ERROR 2
#define IOCB_TIMEDOUT 3
+#define IOCB_ABORTED 4
+#define IOCB_ABORTING 5
+#define IOCB_NORESOURCE 6
#define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */
@@ -138,6 +141,8 @@ struct lpfc_iocbq {
#define WQE_ERROR 2
#define WQE_TIMEDOUT 3
#define WQE_ABORTED 4
+#define WQE_ABORTING 5
+#define WQE_NORESOURCE 6
#define LPFC_MBX_WAKE 1
#define LPFC_MBX_IMED_UNREG 2
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a966cdeb52ee..26f19c95380f 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -549,6 +549,14 @@ struct lpfc_pc_sli4_params {
uint32_t hdr_pp_align;
uint32_t sgl_pages_max;
uint32_t sgl_pp_align;
+ uint32_t mib_size;
+ uint16_t mi_ver;
+#define LPFC_MIB1_SUPPORT 1
+#define LPFC_MIB2_SUPPORT 2
+#define LPFC_MIB3_SUPPORT 3
+ uint16_t mi_value;
+#define LPFC_DFLT_MIB_VAL 2
+ uint8_t mib_bde_cnt;
uint8_t cqv;
uint8_t mqv;
uint8_t wqv;
@@ -920,8 +928,9 @@ struct lpfc_sli4_hba {
struct list_head sp_queue_event;
struct list_head sp_cqe_event_pool;
struct list_head sp_asynce_work_queue;
- struct list_head sp_fcp_xri_aborted_work_queue;
+ spinlock_t asynce_list_lock; /* protect sp_asynce_work_queue list */
struct list_head sp_els_xri_aborted_work_queue;
+ spinlock_t els_xri_abrt_list_lock; /* protect els_xri_aborted list */
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -1103,8 +1112,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
-void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri,
struct lpfc_io_buf *lpfc_ncmd);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c657abf22b75..234dca60995b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.4"
+#define LPFC_DRIVER_VERSION "12.8.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2019 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2020 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index aa4e451d5dc1..a99fdfba7d27 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -462,7 +462,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ if (ndlp &&
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
lpfc_set_disctmo(vport);
@@ -495,8 +495,7 @@ disable_vport(struct fc_vport *fc_vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)
- && phba->link_state >= LPFC_LINK_UP) {
+ if (ndlp && phba->link_state >= LPFC_LINK_UP) {
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
@@ -510,8 +509,6 @@ disable_vport(struct fc_vport *fc_vport)
* calling lpfc_cleanup_rpis(vport, 1)
*/
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -568,8 +565,7 @@ enable_vport(struct fc_vport *fc_vport)
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)
- && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
lpfc_set_disctmo(vport);
lpfc_initial_fdisc(vport);
@@ -597,16 +593,14 @@ lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
return enable_vport(fc_vport);
}
-
int
lpfc_vport_delete(struct fc_vport *fc_vport)
{
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_hba *phba = vport->phba;
+ struct lpfc_hba *phba = vport->phba;
long timeout;
- bool ns_ndlp_referenced = false;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -623,9 +617,11 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
"static vport.\n");
return VPORT_ERROR;
}
+
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
+
/*
* If we are not unloading the driver then prevent the vport_delete
* from happening until after this vport's discovery is finished.
@@ -653,64 +649,22 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
return VPORT_INVAL;
lpfc_free_sysfs_attr(vport);
-
lpfc_debugfs_terminate(vport);
- /*
- * The call to fc_remove_host might release the NameServer ndlp. Since
- * we might need to use the ndlp to send the DA_ID CT command,
- * increment the reference for the NameServer ndlp to prevent it from
- * being released.
- */
- ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_nlp_get(ndlp);
- ns_ndlp_referenced = true;
- }
-
- /* Remove FC host and then SCSI host with the vport */
+ /* Remove FC host to break driver binding. */
fc_remove_host(shost);
scsi_remove_host(shost);
- ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
-
- /* In case of driver unload, we shall not perform fabric logo as the
- * worker thread already stopped at this stage and, in this case, we
- * can safely skip the fabric logo.
- */
- if (phba->pport->load_flag & FC_UNLOADING) {
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
- phba->link_state >= LPFC_LINK_UP) {
- /* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp)
- goto skip_logo;
- else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- goto skip_logo;
- }
- /* Remove ndlp from vport npld list */
- lpfc_dequeue_node(vport, ndlp);
-
- /* Indicate free memory when release */
- spin_lock_irq(&phba->ndlp_lock);
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
- /* Kick off release ndlp when it can be safely done */
- lpfc_nlp_put(ndlp);
- }
+ /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
goto skip_logo;
- }
- /* Otherwise, we will perform fabric logo as needed */
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP &&
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
+ /* Send DA_ID and wait for a completion. */
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
while (vport->ct_flags && timeout)
@@ -721,47 +675,19 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
"1829 CT command failed to "
"delete objects on fabric\n");
}
- /* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp) {
- /* Cannot find existing Fabric ndlp, allocate one */
- ndlp = lpfc_nlp_init(vport, Fabric_DID);
- if (!ndlp)
- goto skip_logo;
- /* Indicate free memory when release */
- NLP_SET_FREE_REQ(ndlp);
- } else {
- if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- goto skip_logo;
- }
-
- /* Remove ndlp from vport list */
- lpfc_dequeue_node(vport, ndlp);
- spin_lock_irq(&phba->ndlp_lock);
- if (!NLP_CHK_FREE_REQ(ndlp))
- /* Indicate free memory when release */
- NLP_SET_FREE_REQ(ndlp);
- else {
- /* Skip this if ndlp is already in free mode */
- spin_unlock_irq(&phba->ndlp_lock);
- goto skip_logo;
- }
- spin_unlock_irq(&phba->ndlp_lock);
- }
/*
* If the vpi is not registered, then a valid FDISC doesn't
* exist and there is no need for a ELS LOGO. Just cleanup
* the ndlp.
*/
- if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
- lpfc_nlp_put(ndlp);
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
goto skip_logo;
- }
+ /* Issue a Fabric LOGO to cleanup fabric resources. */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ goto skip_logo;
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
@@ -774,18 +700,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
skip_logo:
- /*
- * If the NameServer ndlp has been incremented to allow the DA_ID CT
- * command to be sent, decrement the ndlp now.
- */
- if (ns_ndlp_referenced) {
- ndlp = lpfc_findnode_did(vport, NameServer_DID);
- lpfc_nlp_put(ndlp);
- }
-
lpfc_cleanup(vport);
- lpfc_sli_host_down(vport);
+ /* Remove scsi host now. The nodes are cleaned up. */
+ lpfc_sli_host_down(vport);
lpfc_stop_vport_timers(vport);
if (!(phba->pport->load_flag & FC_UNLOADING)) {
@@ -865,8 +783,6 @@ lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->lat_data)
memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
sizeof(struct lpfc_scsicmd_bkt));
@@ -887,8 +803,6 @@ lpfc_alloc_bucket(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;
@@ -921,8 +835,6 @@ lpfc_free_bucket(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index b5dde9d0d054..5c808fbc6ce2 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -285,7 +285,7 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
- BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+ BASR_DRQ | BASR_PHASE_MATCH, 0)) {
int bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX)
@@ -304,7 +304,7 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK,
- BASR_ACK, HZ / 64) < 0)
+ BASR_ACK, 0) < 0)
scmd_printk(KERN_DEBUG, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
@@ -344,7 +344,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
- BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+ BASR_DRQ | BASR_PHASE_MATCH, 0)) {
int bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX)
@@ -362,7 +362,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT,
TCR_LAST_BYTE_SENT,
- HZ / 64) < 0) {
+ 0) < 0) {
scmd_printk(KERN_ERR, hostdata->connected,
"%s: Last Byte Sent timeout\n", __func__);
result = -1;
@@ -372,7 +372,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK,
- BASR_ACK, HZ / 64) < 0)
+ BASR_ACK, 0) < 0)
scmd_printk(KERN_DEBUG, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 5e4137f10e0e..0f808d63580e 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2605,7 +2605,6 @@ struct megasas_aen {
u32 class_locale_word;
} __attribute__ ((packed));
-#ifdef CONFIG_COMPAT
struct compat_megasas_iocpacket {
u16 host_no;
u16 __pad1;
@@ -2621,7 +2620,6 @@ struct compat_megasas_iocpacket {
} __attribute__ ((packed));
#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket)
-#endif
#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e158d3d62056..63a4f48bdc75 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,6 +37,7 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -113,6 +114,10 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -3119,6 +3124,19 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
+static int megasas_map_queues(struct Scsi_Host *shost)
+{
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+
+ if (shost->nr_hw_queues == 1)
+ return 0;
+
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ instance->pdev, instance->low_latency_index_start);
+}
+
static void megasas_aen_polling(struct work_struct *work);
/**
@@ -3427,6 +3445,7 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
+ .map_queues = megasas_map_queues,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
@@ -6808,6 +6827,26 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
+ /* Use shared host tagset only for fusion adaptors
+ * if there are managed interrupts (smp affinity enabled case).
+ * Single msix_vectors in kdump, so shared host tag is also disabled.
+ */
+
+ host->host_tagset = 0;
+ host->nr_hw_queues = 1;
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ (instance->msix_vectors > instance->low_latency_index_start) &&
+ host_tagset_enable &&
+ instance->smp_affinity_enable) {
+ host->host_tagset = 1;
+ host->nr_hw_queues = instance->msix_vectors -
+ instance->low_latency_index_start;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "Max firmware commands: %d shared with nr_hw_queues = %d\n",
+ instance->max_fw_cmds, host->nr_hw_queues);
/*
* Notify the mid-layer about the new controller
*/
@@ -7554,25 +7593,23 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
megasas_return_cmd(instance, cmd);
}
-#ifdef CONFIG_PM
/**
* megasas_suspend - driver suspend entry point
- * @pdev: PCI device structure
- * @state: PCI power state to suspend routine
+ * @dev: Device structure
*/
-static int
-megasas_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+megasas_suspend(struct device *dev)
{
struct megasas_instance *instance;
- instance = pci_get_drvdata(pdev);
+ instance = dev_get_drvdata(dev);
if (!instance)
return 0;
instance->unload = 1;
- dev_info(&pdev->dev, "%s is called\n", __func__);
+ dev_info(dev, "%s is called\n", __func__);
/* Shutdown SR-IOV heartbeat timer */
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
@@ -7602,48 +7639,29 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
/**
* megasas_resume- driver resume entry point
- * @pdev: PCI device structure
+ * @dev: Device structure
*/
-static int
-megasas_resume(struct pci_dev *pdev)
+static int __maybe_unused
+megasas_resume(struct device *dev)
{
int rval;
struct Scsi_Host *host;
struct megasas_instance *instance;
u32 status_reg;
- instance = pci_get_drvdata(pdev);
+ instance = dev_get_drvdata(dev);
if (!instance)
return 0;
host = instance->host;
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- dev_info(&pdev->dev, "%s is called\n", __func__);
- /*
- * PCI prepping: enable device set bus mastering and dma mask
- */
- rval = pci_enable_device_mem(pdev);
-
- if (rval) {
- dev_err(&pdev->dev, "Enable device failed\n");
- return rval;
- }
- pci_set_master(pdev);
+ dev_info(dev, "%s is called\n", __func__);
/*
* We expect the FW state to be READY
@@ -7769,14 +7787,8 @@ fail_reenable_msix:
fail_set_dma_mask:
fail_ready_state:
- pci_disable_device(pdev);
-
return -ENODEV;
}
-#else
-#define megasas_suspend NULL
-#define megasas_resume NULL
-#endif
static inline int
megasas_wait_for_adapter_operational(struct megasas_instance *instance)
@@ -7946,7 +7958,7 @@ skip_firing_dcmds:
/**
* megasas_shutdown - Shutdown entry point
- * @pdev: Generic device structure
+ * @pdev: PCI device structure
*/
static void megasas_shutdown(struct pci_dev *pdev)
{
@@ -8095,7 +8107,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
int error = 0, i;
void *sense = NULL;
dma_addr_t sense_handle;
- unsigned long *sense_ptr;
+ void *sense_ptr;
u32 opcode = 0;
int ret = DCMD_SUCCESS;
@@ -8218,6 +8230,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
}
if (ioc->sense_len) {
+ /* make sure the pointer is part of the frame */
+ if (ioc->sense_off >
+ (sizeof(union megasas_frame) - sizeof(__le64))) {
+ error = -EINVAL;
+ goto out;
+ }
+
sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
&sense_handle, GFP_KERNEL);
if (!sense) {
@@ -8225,12 +8244,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
goto out;
}
- sense_ptr =
- (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
- if (instance->consistent_mask_64bit)
- *sense_ptr = cpu_to_le64(sense_handle);
- else
- *sense_ptr = cpu_to_le32(sense_handle);
+ /* always store 64 bits regardless of addressing */
+ sense_ptr = (void *)cmd->frame + ioc->sense_off;
+ put_unaligned_le64(sense_handle, sense_ptr);
}
/*
@@ -8274,16 +8290,19 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* copy out the sense
*/
if (ioc->sense_len) {
+ void __user *uptr;
/*
* sense_ptr points to the location that has the user
* sense buffer address
*/
- sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
- ioc->sense_off);
+ sense_ptr = (void *)ioc->frame.raw + ioc->sense_off;
+ if (in_compat_syscall())
+ uptr = compat_ptr(get_unaligned((compat_uptr_t *)
+ sense_ptr));
+ else
+ uptr = get_unaligned((void __user **)sense_ptr);
- if (copy_to_user((void __user *)((unsigned long)
- get_unaligned((unsigned long *)sense_ptr)),
- sense, ioc->sense_len)) {
+ if (copy_to_user(uptr, sense, ioc->sense_len)) {
dev_err(&instance->pdev->dev, "Failed to copy out to user "
"sense data\n");
error = -EFAULT;
@@ -8326,6 +8345,38 @@ out:
return error;
}
+static struct megasas_iocpacket *
+megasas_compat_iocpacket_get_user(void __user *arg)
+{
+ struct megasas_iocpacket *ioc;
+ struct compat_megasas_iocpacket __user *cioc = arg;
+ size_t size;
+ int err = -EFAULT;
+ int i;
+
+ ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc)
+ return ERR_PTR(-ENOMEM);
+ size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame);
+ if (copy_from_user(ioc, arg, size))
+ goto out;
+
+ for (i = 0; i < MAX_IOCTL_SGE; i++) {
+ compat_uptr_t iov_base;
+
+ if (get_user(iov_base, &cioc->sgl[i].iov_base) ||
+ get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len))
+ goto out;
+
+ ioc->sgl[i].iov_base = compat_ptr(iov_base);
+ }
+
+ return ioc;
+out:
+ kfree(ioc);
+ return ERR_PTR(err);
+}
+
static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
{
struct megasas_iocpacket __user *user_ioc =
@@ -8334,7 +8385,11 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
struct megasas_instance *instance;
int error;
- ioc = memdup_user(user_ioc, sizeof(*ioc));
+ if (in_compat_syscall())
+ ioc = megasas_compat_iocpacket_get_user(user_ioc);
+ else
+ ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket));
+
if (IS_ERR(ioc))
return PTR_ERR(ioc);
@@ -8439,78 +8494,13 @@ megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
-static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
-{
- struct compat_megasas_iocpacket __user *cioc =
- (struct compat_megasas_iocpacket __user *)arg;
- struct megasas_iocpacket __user *ioc =
- compat_alloc_user_space(sizeof(struct megasas_iocpacket));
- int i;
- int error = 0;
- compat_uptr_t ptr;
- u32 local_sense_off;
- u32 local_sense_len;
- u32 user_sense_off;
-
- if (clear_user(ioc, sizeof(*ioc)))
- return -EFAULT;
-
- if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
- copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
- copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
- copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
- copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
- copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
- return -EFAULT;
-
- /*
- * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
- * sense_len is not null, so prepare the 64bit value under
- * the same condition.
- */
- if (get_user(local_sense_off, &ioc->sense_off) ||
- get_user(local_sense_len, &ioc->sense_len) ||
- get_user(user_sense_off, &cioc->sense_off))
- return -EFAULT;
-
- if (local_sense_off != user_sense_off)
- return -EINVAL;
-
- if (local_sense_len) {
- void __user **sense_ioc_ptr =
- (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
- compat_uptr_t *sense_cioc_ptr =
- (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
- if (get_user(ptr, sense_cioc_ptr) ||
- put_user(compat_ptr(ptr), sense_ioc_ptr))
- return -EFAULT;
- }
-
- for (i = 0; i < MAX_IOCTL_SGE; i++) {
- if (get_user(ptr, &cioc->sgl[i].iov_base) ||
- put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
- copy_in_user(&ioc->sgl[i].iov_len,
- &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
- return -EFAULT;
- }
-
- error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
-
- if (copy_in_user(&cioc->frame.hdr.cmd_status,
- &ioc->frame.hdr.cmd_status, sizeof(u8))) {
- printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
- return -EFAULT;
- }
- return error;
-}
-
static long
megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case MEGASAS_IOC_FIRMWARE32:
- return megasas_mgmt_compat_ioctl_fw(file, arg);
+ return megasas_mgmt_ioctl_fw(file, arg);
case MEGASAS_IOC_GET_AEN:
return megasas_mgmt_ioctl_aen(file, arg);
}
@@ -8534,6 +8524,8 @@ static const struct file_operations megasas_mgmt_fops = {
.llseek = noop_llseek,
};
+static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume);
+
/*
* PCI hotplug support registration structure
*/
@@ -8543,8 +8535,7 @@ static struct pci_driver megasas_pci_driver = {
.id_table = megasas_pci_table,
.probe = megasas_probe_one,
.remove = megasas_detach_one,
- .suspend = megasas_suspend,
- .resume = megasas_resume,
+ .driver.pm = &megasas_pm_ops,
.shutdown = megasas_shutdown,
};
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b0c01cf0428f..fd607287608e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -359,24 +359,29 @@ megasas_get_msix_index(struct megasas_instance *instance,
{
int sdev_busy;
- /* nr_hw_queue = 1 for MegaRAID */
- struct blk_mq_hw_ctx *hctx =
- scmd->device->request_queue->queue_hw_ctx[0];
-
- sdev_busy = atomic_read(&hctx->nr_active);
+ /* TBD - if sml remove device_busy in future, driver
+ * should track counter in internal structure.
+ */
+ sdev_busy = atomic_read(&scmd->device->device_busy);
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
+ } else if (instance->msix_load_balance) {
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
- else
+ } else if (instance->host->nr_hw_queues > 1) {
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
+ instance->low_latency_index_start;
+ } else {
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
+ }
}
/**
@@ -956,9 +961,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
- dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
- instance->max_fw_cmds);
-
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1102,8 +1104,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
- dev_info(&instance->pdev->dev, "Performance mode :%s\n",
- MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+ dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode),
+ instance->low_latency_index_start);
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index 86209455172d..c299f7e078fb 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -79,5 +79,5 @@ config SCSI_MPT2SAS
select SCSI_MPT3SAS
depends on PCI && SCSI
help
- Dummy config option for backwards compatiblity: configure the MPT3SAS
+ Dummy config option for backwards compatibility: configure the MPT3SAS
driver instead.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index bb940cbcbb5d..6e23dc3209fe 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -597,6 +597,71 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
}
/**
+ * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
+ * @ioc: Per Adapter Object
+ *
+ * Return nothing.
+ */
+static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26IoUnitControlRequest_t *mpi_request;
+ Mpi26IoUnitControlReply_t *mpi_reply;
+ u16 smid;
+ ktime_t current_time;
+ u64 TimeStamp = 0;
+ u8 issue_reset = 0;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER;
+ mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
+ current_time = ktime_get_real();
+ TimeStamp = ktime_to_ms(current_time);
+ mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
+ mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32);
+ init_completion(&ioc->scsih_cmds.done);
+ ioc->put_smid_default(ioc, smid);
+ dinitprintk(ioc, ioc_info(ioc,
+ "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
+ TimeStamp));
+ wait_for_completion_timeout(&ioc->scsih_cmds.done,
+ MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ);
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->scsih_cmds.status, mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset);
+ goto issue_host_reset;
+ }
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ dinitprintk(ioc, ioc_info(ioc,
+ "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
+ }
+issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+out:
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+/**
* _base_fault_reset_work - workq handling ioc fault conditions
* @work: input argument, used to derive ioc
*
@@ -720,7 +785,11 @@ _base_fault_reset_work(struct work_struct *work)
return; /* don't rearm timer */
}
ioc->ioc_coredump_loop = 0;
-
+ if (ioc->time_sync_interval &&
+ ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
+ ioc->timestamp_update_count = 0;
+ _base_sync_drv_fw_timestamp(ioc);
+ }
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
rearm_timer:
if (ioc->fault_reset_work_q)
@@ -744,6 +813,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
if (ioc->fault_reset_work_q)
return;
+ ioc->timestamp_update_count = 0;
/* initialize fault polling */
INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
@@ -905,6 +975,20 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
return;
+ /*
+ * Older Firmware version doesn't support driver trigger pages.
+ * So, skip displaying 'config invalid type' type
+ * of error message.
+ */
+ if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
+ Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
+
+ if ((rqst->ExtPageType ==
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) &&
+ !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
+ return;
+ }
+ }
switch (ioc_status) {
@@ -4721,6 +4805,311 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _base_get_event_diag_triggers - get event diag trigger values from
+ * persistent pages
+ * @ioc : per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26DriverTriggerPage2_t trigger_pg2;
+ struct SL_WH_EVENT_TRIGGER_T *event_tg;
+ MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg;
+ Mpi2ConfigReply_t mpi_reply;
+ int r = 0, i = 0;
+ u16 count = 0;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
+ &trigger_pg2);
+ if (r)
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dinitprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return;
+ }
+
+ if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
+ count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger);
+ count = min_t(u16, NUM_VALID_ENTRIES, count);
+ ioc->diag_trigger_event.ValidEntries = count;
+
+ event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
+ mpi_event_tg = &trigger_pg2.MPIEventTriggers[0];
+ for (i = 0; i < count; i++) {
+ event_tg->EventValue = le16_to_cpu(
+ mpi_event_tg->MPIEventCode);
+ event_tg->LogEntryQualifier = le16_to_cpu(
+ mpi_event_tg->MPIEventCodeSpecific);
+ event_tg++;
+ mpi_event_tg++;
+ }
+ }
+}
+
+/**
+ * _base_get_scsi_diag_triggers - get scsi diag trigger values from
+ * persistent pages
+ * @ioc : per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26DriverTriggerPage3_t trigger_pg3;
+ struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
+ MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg;
+ Mpi2ConfigReply_t mpi_reply;
+ int r = 0, i = 0;
+ u16 count = 0;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
+ &trigger_pg3);
+ if (r)
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dinitprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return;
+ }
+
+ if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
+ count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger);
+ count = min_t(u16, NUM_VALID_ENTRIES, count);
+ ioc->diag_trigger_scsi.ValidEntries = count;
+
+ scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
+ mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0];
+ for (i = 0; i < count; i++) {
+ scsi_tg->ASCQ = mpi_scsi_tg->ASCQ;
+ scsi_tg->ASC = mpi_scsi_tg->ASC;
+ scsi_tg->SenseKey = mpi_scsi_tg->SenseKey;
+
+ scsi_tg++;
+ mpi_scsi_tg++;
+ }
+ }
+}
+
+/**
+ * _base_get_mpi_diag_triggers - get mpi diag trigger values from
+ * persistent pages
+ * @ioc : per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26DriverTriggerPage4_t trigger_pg4;
+ struct SL_WH_MPI_TRIGGER_T *status_tg;
+ MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg;
+ Mpi2ConfigReply_t mpi_reply;
+ int r = 0, i = 0;
+ u16 count = 0;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
+ &trigger_pg4);
+ if (r)
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dinitprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return;
+ }
+
+ if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
+ count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger);
+ count = min_t(u16, NUM_VALID_ENTRIES, count);
+ ioc->diag_trigger_mpi.ValidEntries = count;
+
+ status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
+ mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0];
+
+ for (i = 0; i < count; i++) {
+ status_tg->IOCStatus = le16_to_cpu(
+ mpi_status_tg->IOCStatus);
+ status_tg->IocLogInfo = le32_to_cpu(
+ mpi_status_tg->LogInfo);
+
+ status_tg++;
+ mpi_status_tg++;
+ }
+ }
+}
+
+/**
+ * _base_get_master_diag_triggers - get master diag trigger values from
+ * persistent pages
+ * @ioc : per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26DriverTriggerPage1_t trigger_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
+ &trigger_pg1);
+ if (r)
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dinitprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return;
+ }
+
+ if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
+ ioc->diag_trigger_master.MasterData |=
+ le32_to_cpu(
+ trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
+}
+
+/**
+ * _base_check_for_trigger_pages_support - checks whether HBA FW supports
+ * driver trigger pages or not
+ * @ioc : per adapter object
+ *
+ * Returns trigger flags mask if HBA FW supports driver trigger pages,
+ * otherwise returns EFAULT.
+ */
+static int
+_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26DriverTriggerPage0_t trigger_pg0;
+ int r = 0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+
+ r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
+ &trigger_pg0);
+ if (r)
+ return -EFAULT;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return -EFAULT;
+
+ return le16_to_cpu(trigger_pg0.TriggerFlags);
+}
+
+/**
+ * _base_get_diag_triggers - Retrieve diag trigger values from
+ * persistent pages.
+ * @ioc : per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
+{
+ int trigger_flags;
+
+ /*
+ * Default setting of master trigger.
+ */
+ ioc->diag_trigger_master.MasterData =
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+
+ trigger_flags = _base_check_for_trigger_pages_support(ioc);
+ if (trigger_flags < 0)
+ return;
+
+ ioc->supports_trigger_pages = 1;
+
+ /*
+ * Retrieve master diag trigger values from driver trigger pg1
+ * if master trigger bit enabled in TriggerFlags.
+ */
+ if ((u16)trigger_flags &
+ MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID)
+ _base_get_master_diag_triggers(ioc);
+
+ /*
+ * Retrieve event diag trigger values from driver trigger pg2
+ * if event trigger bit enabled in TriggerFlags.
+ */
+ if ((u16)trigger_flags &
+ MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID)
+ _base_get_event_diag_triggers(ioc);
+
+ /*
+ * Retrieve scsi diag trigger values from driver trigger pg3
+ * if scsi trigger bit enabled in TriggerFlags.
+ */
+ if ((u16)trigger_flags &
+ MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID)
+ _base_get_scsi_diag_triggers(ioc);
+ /*
+ * Retrieve mpi error diag trigger values from driver trigger pg4
+ * if loginfo trigger bit enabled in TriggerFlags.
+ */
+ if ((u16)trigger_flags &
+ MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID)
+ _base_get_mpi_diag_triggers(ioc);
+}
+
+/**
+ * _base_update_diag_trigger_pages - Update the driver trigger pages after
+ * online FW update, incase updated FW supports driver
+ * trigger pages.
+ * @ioc : per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
+{
+
+ if (ioc->diag_trigger_master.MasterData)
+ mpt3sas_config_update_driver_trigger_pg1(ioc,
+ &ioc->diag_trigger_master, 1);
+
+ if (ioc->diag_trigger_event.ValidEntries)
+ mpt3sas_config_update_driver_trigger_pg2(ioc,
+ &ioc->diag_trigger_event, 1);
+
+ if (ioc->diag_trigger_scsi.ValidEntries)
+ mpt3sas_config_update_driver_trigger_pg3(ioc,
+ &ioc->diag_trigger_scsi, 1);
+
+ if (ioc->diag_trigger_mpi.ValidEntries)
+ mpt3sas_config_update_driver_trigger_pg4(ioc,
+ &ioc->diag_trigger_mpi, 1);
+}
+
+/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
@@ -4729,7 +5118,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2ConfigReply_t mpi_reply;
u32 iounit_pg1_flags;
-
+ int tg_flags = 0;
ioc->nvme_abort_timeout = 30;
mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
if (ioc->ir_firmware)
@@ -4761,7 +5150,24 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
else
ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
}
-
+ ioc->time_sync_interval =
+ ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
+ if (ioc->time_sync_interval) {
+ if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
+ ioc->time_sync_interval =
+ ioc->time_sync_interval * SECONDS_PER_HOUR;
+ else
+ ioc->time_sync_interval =
+ ioc->time_sync_interval * SECONDS_PER_MIN;
+ dinitprintk(ioc, ioc_info(ioc,
+ "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
+ ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
+ MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"));
+ } else {
+ if (ioc->is_gen35_ioc)
+ ioc_warn(ioc,
+ "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
+ }
mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
@@ -4789,6 +5195,29 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
if (ioc->is_aero_ioc)
_base_update_ioc_page1_inlinewith_perf_mode(ioc);
+ if (ioc->is_gen35_ioc) {
+ if (ioc->is_driver_loading)
+ _base_get_diag_triggers(ioc);
+ else {
+ /*
+ * In case of online HBA FW update operation,
+ * check whether updated FW supports the driver trigger
+ * pages or not.
+ * - If previous FW has not supported driver trigger
+ * pages and newer FW supports them then update these
+ * pages with current diag trigger values.
+ * - If previous FW has supported driver trigger pages
+ * and new FW doesn't support them then disable
+ * support_trigger_pages flag.
+ */
+ tg_flags = _base_check_for_trigger_pages_support(ioc);
+ if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
+ _base_update_diag_trigger_pages(ioc);
+ else if (ioc->supports_trigger_pages &&
+ tg_flags == -EFAULT)
+ ioc->supports_trigger_pages = 0;
+ }
+ }
}
/**
@@ -6473,6 +6902,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
r = -EIO;
}
+ /* Reset TimeSync Counter*/
+ ioc->timestamp_update_count = 0;
return r;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index bc8beb10f3fc..2def7a340616 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -71,13 +71,14 @@
#include "mpt3sas_debug.h"
#include "mpt3sas_trigger_diag.h"
+#include "mpt3sas_trigger_pages.h"
/* driver versioning info */
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "35.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 35
+#define MPT3SAS_DRIVER_VERSION "36.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 36
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -93,6 +94,14 @@
/* CoreDump: Default timeout */
#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /*15 seconds*/
#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF)
+#define MPT3SAS_TIMESYNC_TIMEOUT_SECONDS (10) /* 10 seconds */
+#define MPT3SAS_TIMESYNC_UPDATE_INTERVAL (900) /* 15 minutes */
+#define MPT3SAS_TIMESYNC_UNIT_MASK (0x80) /* bit 7 */
+#define MPT3SAS_TIMESYNC_MASK (0x7F) /* 0 - 6 bits */
+#define SECONDS_PER_MIN (60)
+#define SECONDS_PER_HOUR (3600)
+#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF)
+#define MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP (0x81)
/*
* Set MPT3SAS_SG_DEPTH value based on user input.
@@ -405,7 +414,7 @@ struct Mpi2ManufacturingPage11_t {
u16 HostTraceBufferMaxSizeKB; /* 50h */
u16 HostTraceBufferMinSizeKB; /* 52h */
u8 CoreDumpTOSec; /* 54h */
- u8 Reserved8; /* 55h */
+ u8 TimeSyncInterval; /* 55h */
u16 Reserved9; /* 56h */
__le32 Reserved10; /* 58h */
};
@@ -420,6 +429,7 @@ struct Mpi2ManufacturingPage11_t {
* @flags: MPT_TARGET_FLAGS_XXX flags
* @deleted: target flaged for deletion
* @tm_busy: target is busy with TM request.
+ * @port: hba port entry containing target's port number info
* @sas_dev: The sas_device associated with this target
* @pcie_dev: The pcie device associated with this target
*/
@@ -432,6 +442,7 @@ struct MPT3SAS_TARGET {
u32 flags;
u8 deleted;
u8 tm_busy;
+ struct hba_port *port;
struct _sas_device *sas_dev;
struct _pcie_device *pcie_dev;
};
@@ -534,6 +545,9 @@ struct _internal_cmd {
* addition routine.
* @chassis_slot: chassis slot
* @is_chassis_slot_valid: chassis slot valid or not
+ * @port: hba port entry containing device's port number info
+ * @rphy: device's sas_rphy address used to identify this device structure in
+ * target_alloc callback function
*/
struct _sas_device {
struct list_head list;
@@ -560,6 +574,8 @@ struct _sas_device {
u8 is_chassis_slot_valid;
u8 connector_name[5];
struct kref refcount;
+ struct hba_port *port;
+ struct sas_rphy *rphy;
};
static inline void sas_device_get(struct _sas_device *s)
@@ -730,6 +746,7 @@ struct _boot_device {
* @remote_identify: attached device identification
* @rphy: sas transport rphy object
* @port: sas transport wide/narrow port object
+ * @hba_port: hba port entry containing port's port number info
* @phy_list: _sas_phy list objects belonging to this port
*/
struct _sas_port {
@@ -738,6 +755,7 @@ struct _sas_port {
struct sas_identify remote_identify;
struct sas_rphy *rphy;
struct sas_port *port;
+ struct hba_port *hba_port;
struct list_head phy_list;
};
@@ -751,6 +769,7 @@ struct _sas_port {
* @handle: device handle for this phy
* @attached_handle: device handle for attached device
* @phy_belongs_to_port: port has been created for this phy
+ * @port: hba port entry containing port number info
*/
struct _sas_phy {
struct list_head port_siblings;
@@ -761,6 +780,8 @@ struct _sas_phy {
u16 handle;
u16 attached_handle;
u8 phy_belongs_to_port;
+ u8 hba_vphy;
+ struct hba_port *port;
};
/**
@@ -776,6 +797,8 @@ struct _sas_phy {
* @responding: used in _scsih_expander_device_mark_responding
* @phy: a list of phys that make up this sas_host/expander
* @sas_port_list: list of ports attached to this sas_host/expander
+ * @port: hba port entry containing node's port number info
+ * @rphy: sas_rphy object of this expander
*/
struct _sas_node {
struct list_head list;
@@ -787,11 +810,12 @@ struct _sas_node {
u16 enclosure_handle;
u64 enclosure_logical_id;
u8 responding;
+ struct hba_port *port;
struct _sas_phy *phy;
struct list_head sas_port_list;
+ struct sas_rphy *rphy;
};
-
/**
* struct _enclosure_node - enclosure information
* @list: list of enclosures
@@ -1009,6 +1033,46 @@ struct reply_post_struct {
dma_addr_t reply_post_free_dma;
};
+/**
+ * struct virtual_phy - vSES phy structure
+ * sas_address: SAS Address of vSES device
+ * phy_mask: vSES device's phy number
+ * flags: flags used to manage this structure
+ */
+struct virtual_phy {
+ struct list_head list;
+ u64 sas_address;
+ u32 phy_mask;
+ u8 flags;
+};
+
+#define MPT_VPHY_FLAG_DIRTY_PHY 0x01
+
+/**
+ * struct hba_port - Saves each HBA's Wide/Narrow port info
+ * @sas_address: sas address of this wide/narrow port's attached device
+ * @phy_mask: HBA PHY's belonging to this port
+ * @port_id: port number
+ * @flags: hba port flags
+ * @vphys_mask : mask of vSES devices Phy number
+ * @vphys_list : list containing vSES device structures
+ */
+struct hba_port {
+ struct list_head list;
+ u64 sas_address;
+ u32 phy_mask;
+ u8 port_id;
+ u8 flags;
+ u32 vphys_mask;
+ struct list_head vphys_list;
+};
+
+/* hba port flags */
+#define HBA_PORT_FLAG_DIRTY_PORT 0x01
+#define HBA_PORT_FLAG_NEW_PORT 0x02
+
+#define MULTIPATH_DISABLED_PORT_ID 0xFF
+
typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
/**
* struct MPT3SAS_ADAPTER - per adapter struct
@@ -1058,6 +1122,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @cpu_msix_table_sz: table size
* @total_io_cnt: Gives total IO count, used to load balance the interrupts
* @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state
+ * @timestamp_update_count: Counter to fire timeSync command
+ * time_sync_interval: Time sync interval read from man page 11
* @high_iops_outstanding: used to load balance the interrupts
* within high iops reply queues
* @msix_load_balance: Enables load balancing of interrupts across
@@ -1191,6 +1257,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* which ensures the syncrhonization between cli/sysfs_show path.
* @atomic_desc_capable: Atomic Request Descriptor support.
* @GET_MSIX_INDEX: Get the msix index of high iops queues.
+ * @multipath_on_hba: flag to determine multipath on hba is enabled or not
+ * @port_table_list: list containing HBA's wide/narrow port's info
*/
struct MPT3SAS_ADAPTER {
struct list_head list;
@@ -1251,6 +1319,8 @@ struct MPT3SAS_ADAPTER {
MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
u32 non_operational_loop;
u8 ioc_coredump_loop;
+ u32 timestamp_update_count;
+ u32 time_sync_interval;
atomic64_t total_io_cnt;
atomic64_t high_iops_outstanding;
bool msix_load_balance;
@@ -1472,6 +1542,7 @@ struct MPT3SAS_ADAPTER {
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+ u8 supports_trigger_pages;
void *device_remove_in_progress;
u16 device_remove_in_progress_sz;
u8 is_gen35_ioc;
@@ -1483,6 +1554,9 @@ struct MPT3SAS_ADAPTER {
PUT_SMID_IO_FP_HIP put_smid_hi_priority;
PUT_SMID_DEFAULT put_smid_default;
GET_MSIX_INDEX get_msix_index_for_smlio;
+
+ u8 multipath_on_hba;
+ struct list_head port_table_list;
};
struct mpt3sas_debugfs_buffer {
@@ -1619,20 +1693,27 @@ int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
-void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port);
void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address);
+ u64 sas_address, struct hba_port *port);
u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
+struct hba_port *
+mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port,
+ u8 bypass_dirty_port_flag);
struct _sas_node *mpt3sas_scsih_expander_find_by_handle(
struct MPT3SAS_ADAPTER *ioc, u16 handle);
struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address(
- struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port);
struct _sas_device *mpt3sas_get_sdev_by_addr(
- struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port);
struct _sas_device *__mpt3sas_get_sdev_by_addr(
- struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port);
struct _sas_device *mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
u16 handle);
struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
@@ -1642,6 +1723,11 @@ void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
+struct _sas_device *
+__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, struct sas_rphy *rphy);
+struct virtual_phy *
+mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port, u32 phy);
/* config shared API */
u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1733,6 +1819,33 @@ int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
u16 *volume_handle);
int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc,
u16 volume_handle, u64 *wwid);
+int
+mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page);
+int
+mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page);
+int
+mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page);
+int
+mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page);
+int
+mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page);
+int
+mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set);
+int
+mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set);
+int
+mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set);
+int
+mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set);
/* ctl shared API */
extern struct device_attribute *mpt3sas_host_attrs[];
@@ -1759,18 +1872,26 @@ extern struct scsi_transport_template *mpt3sas_transport_template;
u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc,
- u16 handle, u64 sas_address);
+ u16 handle, u64 sas_address, struct hba_port *port);
void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
- u64 sas_address_parent);
+ u64 sas_address_parent, struct hba_port *port);
int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
*mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc,
struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
struct device *parent_dev);
void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate,
+ struct hba_port *port);
extern struct sas_function_template mpt3sas_transport_functions;
extern struct scsi_transport_template *mpt3sas_transport_template;
+void
+mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy);
+void
+mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy,
+ u64 sas_address, struct hba_port *port);
/* trigger data externs */
void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 4a0ddc7c95e4..8238843523b5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -1743,6 +1743,766 @@ mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
}
/**
+ * mpt3sas_config_get_driver_trigger_pg0 - obtain driver trigger page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_driver_trigger_pg0 - write driver trigger page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg0 - update driver trigger page 0
+ * @ioc: per adapter object
+ * @trigger_flag: trigger type bit map
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_config_update_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
+ u16 trigger_flag, bool set)
+{
+ Mpi26DriverTriggerPage0_t tg_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc;
+ u16 flags, ioc_status;
+
+ rc = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0);
+ if (rc)
+ return rc;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg0, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return -EFAULT;
+ }
+
+ if (set)
+ flags = le16_to_cpu(tg_pg0.TriggerFlags) | trigger_flag;
+ else
+ flags = le16_to_cpu(tg_pg0.TriggerFlags) & ~trigger_flag;
+
+ tg_pg0.TriggerFlags = cpu_to_le16(flags);
+
+ rc = _config_set_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0);
+ if (rc)
+ return rc;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to update trigger pg0, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * mpt3sas_config_get_driver_trigger_pg1 - obtain driver trigger page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_driver_trigger_pg1 - write driver trigger page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg1 - update driver trigger page 1
+ * @ioc: per adapter object
+ * @master_tg: Master trigger bit map
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set)
+{
+ Mpi26DriverTriggerPage1_t tg_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc;
+ u16 ioc_status;
+
+ rc = mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, set);
+ if (rc)
+ return rc;
+
+ rc = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (set) {
+ tg_pg1.NumMasterTrigger = cpu_to_le16(1);
+ tg_pg1.MasterTriggers[0].MasterTriggerFlags = cpu_to_le32(
+ master_tg->MasterData);
+ } else {
+ tg_pg1.NumMasterTrigger = 0;
+ tg_pg1.MasterTriggers[0].MasterTriggerFlags = 0;
+ }
+
+ rc = _config_set_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, !set);
+
+ return rc;
+}
+
+/**
+ * mpt3sas_config_get_driver_trigger_pg2 - obtain driver trigger page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 2;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_driver_trigger_pg2 - write driver trigger page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 2;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg2 - update driver trigger page 2
+ * @ioc: per adapter object
+ * @event_tg: list of Event Triggers
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set)
+{
+ Mpi26DriverTriggerPage2_t tg_pg2;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc, i, count;
+ u16 ioc_status;
+
+ rc = mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, set);
+ if (rc)
+ return rc;
+
+ rc = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (set) {
+ count = event_tg->ValidEntries;
+ tg_pg2.NumMPIEventTrigger = cpu_to_le16(count);
+ for (i = 0; i < count; i++) {
+ tg_pg2.MPIEventTriggers[i].MPIEventCode =
+ cpu_to_le16(
+ event_tg->EventTriggerEntry[i].EventValue);
+ tg_pg2.MPIEventTriggers[i].MPIEventCodeSpecific =
+ cpu_to_le16(
+ event_tg->EventTriggerEntry[i].LogEntryQualifier);
+ }
+ } else {
+ tg_pg2.NumMPIEventTrigger = 0;
+ memset(&tg_pg2.MPIEventTriggers[0], 0,
+ NUM_VALID_ENTRIES * sizeof(
+ MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY));
+ }
+
+ rc = _config_set_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, !set);
+
+ return rc;
+}
+
+/**
+ * mpt3sas_config_get_driver_trigger_pg3 - obtain driver trigger page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_driver_trigger_pg3 - write driver trigger page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg3 - update driver trigger page 3
+ * @ioc: per adapter object
+ * @scsi_tg: scsi trigger list
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set)
+{
+ Mpi26DriverTriggerPage3_t tg_pg3;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc, i, count;
+ u16 ioc_status;
+
+ rc = mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, set);
+ if (rc)
+ return rc;
+
+ rc = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return -EFAULT;
+ }
+
+ if (set) {
+ count = scsi_tg->ValidEntries;
+ tg_pg3.NumSCSISenseTrigger = cpu_to_le16(count);
+ for (i = 0; i < count; i++) {
+ tg_pg3.SCSISenseTriggers[i].ASCQ =
+ scsi_tg->SCSITriggerEntry[i].ASCQ;
+ tg_pg3.SCSISenseTriggers[i].ASC =
+ scsi_tg->SCSITriggerEntry[i].ASC;
+ tg_pg3.SCSISenseTriggers[i].SenseKey =
+ scsi_tg->SCSITriggerEntry[i].SenseKey;
+ }
+ } else {
+ tg_pg3.NumSCSISenseTrigger = 0;
+ memset(&tg_pg3.SCSISenseTriggers[0], 0,
+ NUM_VALID_ENTRIES * sizeof(
+ MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY));
+ }
+
+ rc = _config_set_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ return -EFAULT;
+ }
+
+ return 0;
+out:
+ mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, !set);
+
+ return rc;
+}
+
+/**
+ * mpt3sas_config_get_driver_trigger_pg4 - obtain driver trigger page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_driver_trigger_pg4 - write driver trigger page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_set_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType =
+ MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_update_driver_trigger_pg4 - update driver trigger page 4
+ * @ioc: per adapter object
+ * @mpi_tg: mpi trigger list
+ * @set: set ot clear trigger values
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set)
+{
+ Mpi26DriverTriggerPage4_t tg_pg4;
+ Mpi2ConfigReply_t mpi_reply;
+ int rc, i, count;
+ u16 ioc_status;
+
+ rc = mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, set);
+ if (rc)
+ return rc;
+
+ rc = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (set) {
+ count = mpi_tg->ValidEntries;
+ tg_pg4.NumIOCStatusLogInfoTrigger = cpu_to_le16(count);
+ for (i = 0; i < count; i++) {
+ tg_pg4.IOCStatusLoginfoTriggers[i].IOCStatus =
+ cpu_to_le16(mpi_tg->MPITriggerEntry[i].IOCStatus);
+ tg_pg4.IOCStatusLoginfoTriggers[i].LogInfo =
+ cpu_to_le32(mpi_tg->MPITriggerEntry[i].IocLogInfo);
+ }
+ } else {
+ tg_pg4.NumIOCStatusLogInfoTrigger = 0;
+ memset(&tg_pg4.IOCStatusLoginfoTriggers[0], 0,
+ NUM_VALID_ENTRIES * sizeof(
+ MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY));
+ }
+
+ rc = _config_set_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4);
+ if (rc)
+ goto out;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dcprintk(ioc,
+ ioc_err(ioc,
+ "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
+ __func__, ioc_status));
+ rc = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ mpt3sas_config_update_driver_trigger_pg0(ioc,
+ MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, !set);
+
+ return rc;
+}
+
+/**
* mpt3sas_config_get_volume_handle - returns volume handle for give hidden
* raid components
* @ioc: per adapter object
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index edd26a2570fa..c8a0ce18f2c5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -902,8 +902,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
(Mpi2SmpPassthroughRequest_t *)mpi_request;
u8 *data;
- /* ioc determines which port to use */
- smp_request->PhysicalPort = 0xFF;
+ if (!ioc->multipath_on_hba) {
+ /* ioc determines which port to use */
+ smp_request->PhysicalPort = 0xFF;
+ }
if (smp_request->PassthroughFlags &
MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
data = (u8 *)&smp_request->SGL;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5f845d7094fc..c8b09a81834d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -159,6 +159,15 @@ module_param(enable_sdev_max_qd, bool, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd,
"Enable sdev max qd as can_queue, def=disabled(0)");
+static int multipath_on_hba = -1;
+module_param(multipath_on_hba, int, 0);
+MODULE_PARM_DESC(multipath_on_hba,
+ "Multipath support to add same target device\n\t\t"
+ "as many times as it is visible to HBA from various paths\n\t\t"
+ "(by default:\n\t\t"
+ "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
+ "\t SAS 3.5 HBA - This will be enabled)");
+
/* raid transport support */
static struct raid_template *mpt3sas_raid_template;
static struct raid_template *mpt2sas_raid_template;
@@ -357,6 +366,87 @@ _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
}
/**
+ * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
+ * port number from port list
+ * @ioc: per adapter object
+ * @port_id: port number
+ * @bypass_dirty_port_flag: when set look the matching hba port entry even
+ * if hba port entry is marked as dirty.
+ *
+ * Search for hba port entry corresponding to provided port number,
+ * if available return port object otherwise return NULL.
+ */
+struct hba_port *
+mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
+ u8 port_id, u8 bypass_dirty_port_flag)
+{
+ struct hba_port *port, *port_next;
+
+ /*
+ * When multipath_on_hba is disabled then
+ * search the hba_port entry using default
+ * port id i.e. 255
+ */
+ if (!ioc->multipath_on_hba)
+ port_id = MULTIPATH_DISABLED_PORT_ID;
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (port->port_id != port_id)
+ continue;
+ if (bypass_dirty_port_flag)
+ return port;
+ if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
+ continue;
+ return port;
+ }
+
+ /*
+ * Allocate hba_port object for default port id (i.e. 255)
+ * when multipath_on_hba is disabled for the HBA.
+ * And add this object to port_table_list.
+ */
+ if (!ioc->multipath_on_hba) {
+ port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
+ if (!port)
+ return NULL;
+
+ port->port_id = port_id;
+ ioc_info(ioc,
+ "hba_port entry: %p, port: %d is added to hba_port list\n",
+ port, port->port_id);
+ list_add_tail(&port->list,
+ &ioc->port_table_list);
+ return port;
+ }
+ return NULL;
+}
+
+/**
+ * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
+ * @ioc: per adapter object
+ * @port: hba_port object
+ * @phy: phy number
+ *
+ * Return virtual_phy object corresponding to phy number.
+ */
+struct virtual_phy *
+mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port, u32 phy)
+{
+ struct virtual_phy *vphy, *vphy_next;
+
+ if (!port->vphys_mask)
+ return NULL;
+
+ list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
+ if (vphy->phy_mask & (1 << phy))
+ return vphy;
+ }
+ return NULL;
+}
+
+/**
* _scsih_is_boot_device - search for matching boot device.
* @sas_address: sas address
* @device_name: device name specified in INDENTIFY fram
@@ -614,48 +704,105 @@ mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
return ret;
}
+
+/**
+ * __mpt3sas_get_sdev_by_rphy - sas device search
+ * @ioc: per adapter object
+ * @rphy: sas_rphy pointer
+ *
+ * Context: This function will acquire ioc->sas_device_lock and will release
+ * before returning the sas_device object.
+ *
+ * This searches for sas_device from rphy object
+ * then return sas_device object.
+ */
struct _sas_device *
-__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_rphy *rphy)
{
struct _sas_device *sas_device;
assert_spin_locked(&ioc->sas_device_lock);
- list_for_each_entry(sas_device, &ioc->sas_device_list, list)
- if (sas_device->sas_address == sas_address)
- goto found_device;
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->rphy != rphy)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
- list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
- if (sas_device->sas_address == sas_address)
- goto found_device;
+ sas_device = NULL;
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
+ if (sas_device->rphy != rphy)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
return NULL;
+}
-found_device:
- sas_device_get(sas_device);
- return sas_device;
+/**
+ * mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
+ * sas address from sas_device_list list
+ * @ioc: per adapter object
+ * @port: port number
+ *
+ * Search for _sas_device object corresponding to provided sas address,
+ * if available return _sas_device object address otherwise return NULL.
+ */
+struct _sas_device *
+__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct hba_port *port)
+{
+ struct _sas_device *sas_device;
+
+ if (!port)
+ return NULL;
+
+ assert_spin_locked(&ioc->sas_device_lock);
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->sas_address != sas_address)
+ continue;
+ if (sas_device->port != port)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
+ if (sas_device->sas_address != sas_address)
+ continue;
+ if (sas_device->port != port)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
+
+ return NULL;
}
/**
* mpt3sas_get_sdev_by_addr - sas device search
* @ioc: per adapter object
* @sas_address: sas address
+ * @port: hba port entry
* Context: Calling function should acquire ioc->sas_device_lock
*
- * This searches for sas_device based on sas_address, then return sas_device
- * object.
+ * This searches for sas_device based on sas_address & port number,
+ * then return sas_device object.
*/
struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
struct _sas_device *sas_device;
unsigned long flags;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_address);
+ sas_address, port);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return sas_device;
@@ -824,13 +971,17 @@ _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
- * mpt3sas_device_remove_by_sas_address - removing device object by sas address
+ * mpt3sas_device_remove_by_sas_address - removing device object by
+ * sas address & port number
* @ioc: per adapter object
* @sas_address: device sas_address
+ * @port: hba port entry
+ *
+ * Return nothing.
*/
void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
struct _sas_device *sas_device;
unsigned long flags;
@@ -839,7 +990,7 @@ mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
+ sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
if (sas_device) {
list_del_init(&sas_device->list);
sas_device_put(sas_device);
@@ -884,7 +1035,7 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
}
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent)) {
+ sas_device->sas_address_parent, sas_device->port)) {
_scsih_sas_device_remove(ioc, sas_device);
} else if (!sas_device->starget) {
/*
@@ -895,7 +1046,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
if (!ioc->is_driver_loading) {
mpt3sas_transport_port_remove(ioc,
sas_device->sas_address,
- sas_device->sas_address_parent);
+ sas_device->sas_address_parent,
+ sas_device->port);
_scsih_sas_device_remove(ioc, sas_device);
}
} else
@@ -1432,21 +1584,26 @@ out:
* mpt3sas_scsih_expander_find_by_sas_address - expander device search
* @ioc: per adapter object
* @sas_address: sas address
+ * @port: hba port entry
* Context: Calling function should acquire ioc->sas_node_lock.
*
- * This searches for expander device based on sas_address, then returns the
- * sas_node object.
+ * This searches for expander device based on sas_address & port number,
+ * then returns the sas_node object.
*/
struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
- struct _sas_node *sas_expander, *r;
+ struct _sas_node *sas_expander, *r = NULL;
+
+ if (!port)
+ return r;
- r = NULL;
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
if (sas_expander->sas_address != sas_address)
continue;
+ if (sas_expander->port != port)
+ continue;
r = sas_expander;
goto out;
}
@@ -1744,6 +1901,7 @@ scsih_target_alloc(struct scsi_target *starget)
if (pcie_device) {
sas_target_priv_data->handle = pcie_device->handle;
sas_target_priv_data->sas_address = pcie_device->wwid;
+ sas_target_priv_data->port = NULL;
sas_target_priv_data->pcie_dev = pcie_device;
pcie_device->starget = starget;
pcie_device->id = starget->id;
@@ -1761,12 +1919,12 @@ scsih_target_alloc(struct scsi_target *starget)
/* sas/sata devices */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
rphy = dev_to_rphy(starget->dev.parent);
- sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- rphy->identify.sas_address);
+ sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
if (sas_device) {
sas_target_priv_data->handle = sas_device->handle;
sas_target_priv_data->sas_address = sas_device->sas_address;
+ sas_target_priv_data->port = sas_device->port;
sas_target_priv_data->sas_dev = sas_device;
sas_device->starget = starget;
sas_device->id = starget->id;
@@ -1922,7 +2080,8 @@ scsih_slave_alloc(struct scsi_device *sdev)
} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_target_priv_data->sas_address);
+ sas_target_priv_data->sas_address,
+ sas_target_priv_data->port);
if (sas_device && (sas_device->starget == NULL)) {
sdev_printk(KERN_INFO, sdev,
"%s : sas_device->starget set to starget @ %d\n",
@@ -2527,7 +2686,8 @@ scsih_slave_configure(struct scsi_device *sdev)
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_device_priv_data->sas_target->sas_address);
+ sas_device_priv_data->sas_target->sas_address,
+ sas_device_priv_data->sas_target->port);
if (!sas_device) {
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
dfailprintk(ioc,
@@ -2762,8 +2922,8 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
- * @ioc - per adapter object
- * @channel - the channel assigned by the OS
+ * @ioc: per adapter object
+ * @channel: the channel assigned by the OS
* @id: the id assigned by the OS
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
@@ -2808,9 +2968,9 @@ scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
/**
* scsih_tm_post_processing - post processing of target & LUN reset
- * @ioc - per adapter object
+ * @ioc: per adapter object
* @handle: device handle
- * @channel - the channel assigned by the OS
+ * @channel: the channel assigned by the OS
* @id: the id assigned by the OS
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
@@ -3513,6 +3673,8 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
*
* Walk the firmware event queue, either killing timers, or waiting
* for outstanding events to complete
+ *
+ * Context: task, can sleep
*/
static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
@@ -3520,7 +3682,7 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
struct fw_event_work *fw_event;
if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
- !ioc->firmware_event_thread || in_interrupt())
+ !ioc->firmware_event_thread)
return;
ioc->fw_events_cleanup = 1;
@@ -3642,11 +3804,13 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
* _scsih_ublock_io_device - prepare device to be deleted
* @ioc: per adapter object
* @sas_address: sas address
+ * @port: hba port entry
*
* unblock then put device in offline state
*/
static void
-_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct hba_port *port)
{
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
@@ -3658,6 +3822,8 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
if (sas_device_priv_data->sas_target->sas_address
!= sas_address)
continue;
+ if (sas_device_priv_data->sas_target->port != port)
+ continue;
if (sas_device_priv_data->block)
_scsih_internal_device_unblock(sdev,
sas_device_priv_data);
@@ -3758,7 +3924,8 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
SAS_END_DEVICE) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
if (sas_device) {
set_bit(sas_device->handle,
ioc->blocking_handles);
@@ -3777,7 +3944,8 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
SAS_FANOUT_EXPANDER_DEVICE) {
expander_sibling =
mpt3sas_scsih_expander_find_by_sas_address(
- ioc, mpt3sas_port->remote_identify.sas_address);
+ ioc, mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
_scsih_block_io_to_children_attached_to_ex(ioc,
expander_sibling);
}
@@ -3866,6 +4034,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _tr_list *delayed_tr;
u32 ioc_state;
u8 tr_method = 0;
+ struct hba_port *port = NULL;
if (ioc->pci_error_recovery) {
dewtprintk(ioc,
@@ -3894,6 +4063,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_target_priv_data = sas_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
sas_address = sas_device->sas_address;
+ port = sas_device->port;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (!sas_device) {
@@ -3941,7 +4111,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device->enclosure_level,
pcie_device->connector_name));
}
- _scsih_ublock_io_device(ioc, sas_address);
+ _scsih_ublock_io_device(ioc, sas_address, port);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -5715,6 +5885,614 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
}
/**
+ * _scsih_update_vphys_after_reset - update the Port's
+ * vphys_list after reset
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 sz, ioc_status;
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u64 attached_sas_addr;
+ u8 found = 0, port_id;
+ Mpi2SasPhyPage0_t phy_pg0;
+ struct hba_port *port, *port_next, *mport;
+ struct virtual_phy *vphy, *vphy_next;
+ struct _sas_device *sas_device;
+
+ /*
+ * Mark all the vphys objects as dirty.
+ */
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (!port->vphys_mask)
+ continue;
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
+ }
+ }
+
+ /*
+ * Read SASIOUnitPage0 to get each HBA Phy's data.
+ */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
+ (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ /*
+ * Loop over each HBA Phy.
+ */
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ /*
+ * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
+ */
+ if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
+ MPI2_SAS_NEG_LINK_RATE_1_5)
+ continue;
+ /*
+ * Check whether Phy is connected to SEP device or not,
+ * if it is SEP device then read the Phy's SASPHYPage0 data to
+ * determine whether Phy is a virtual Phy or not. if it is
+ * virtual phy then it is conformed that the attached remote
+ * device is a HBA's vSES device.
+ */
+ if (!(le32_to_cpu(
+ sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
+ MPI2_SAS_DEVICE_INFO_SEP))
+ continue;
+
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ i))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+
+ if (!(le32_to_cpu(phy_pg0.PhyInfo) &
+ MPI2_SAS_PHYINFO_VIRTUAL_PHY))
+ continue;
+ /*
+ * Get the vSES device's SAS Address.
+ */
+ attached_handle = le16_to_cpu(
+ sas_iounit_pg0->PhyData[i].AttachedDevHandle);
+ if (_scsih_get_sas_address(ioc, attached_handle,
+ &attached_sas_addr) != 0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+
+ found = 0;
+ port = port_next = NULL;
+ /*
+ * Loop over each virtual_phy object from
+ * each port's vphys_list.
+ */
+ list_for_each_entry_safe(port,
+ port_next, &ioc->port_table_list, list) {
+ if (!port->vphys_mask)
+ continue;
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ /*
+ * Continue with next virtual_phy object
+ * if the object is not marked as dirty.
+ */
+ if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
+ continue;
+
+ /*
+ * Continue with next virtual_phy object
+ * if the object's SAS Address is not equals
+ * to current Phy's vSES device SAS Address.
+ */
+ if (vphy->sas_address != attached_sas_addr)
+ continue;
+ /*
+ * Enable current Phy number bit in object's
+ * phy_mask field.
+ */
+ if (!(vphy->phy_mask & (1 << i)))
+ vphy->phy_mask = (1 << i);
+ /*
+ * Get hba_port object from hba_port table
+ * corresponding to current phy's Port ID.
+ * if there is no hba_port object corresponding
+ * to Phy's Port ID then create a new hba_port
+ * object & add to hba_port table.
+ */
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
+ if (!mport) {
+ mport = kzalloc(
+ sizeof(struct hba_port), GFP_KERNEL);
+ if (!mport)
+ break;
+ mport->port_id = port_id;
+ ioc_info(ioc,
+ "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
+ __func__, mport, mport->port_id);
+ list_add_tail(&mport->list,
+ &ioc->port_table_list);
+ }
+ /*
+ * If mport & port pointers are not pointing to
+ * same hba_port object then it means that vSES
+ * device's Port ID got changed after reset and
+ * hence move current virtual_phy object from
+ * port's vphys_list to mport's vphys_list.
+ */
+ if (port != mport) {
+ if (!mport->vphys_mask)
+ INIT_LIST_HEAD(
+ &mport->vphys_list);
+ mport->vphys_mask |= (1 << i);
+ port->vphys_mask &= ~(1 << i);
+ list_move(&vphy->list,
+ &mport->vphys_list);
+ sas_device = mpt3sas_get_sdev_by_addr(
+ ioc, attached_sas_addr, port);
+ if (sas_device)
+ sas_device->port = mport;
+ }
+ /*
+ * Earlier while updating the hba_port table,
+ * it is determined that there is no other
+ * direct attached device with mport's Port ID,
+ * Hence mport was marked as dirty. Only vSES
+ * device has this Port ID, so unmark the mport
+ * as dirt.
+ */
+ if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
+ mport->sas_address = 0;
+ mport->phy_mask = 0;
+ mport->flags &=
+ ~HBA_PORT_FLAG_DIRTY_PORT;
+ }
+ /*
+ * Unmark current virtual_phy object as dirty.
+ */
+ vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
+ found = 1;
+ break;
+ }
+ if (found)
+ break;
+ }
+ }
+out:
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_get_port_table_after_reset - Construct temporary port table
+ * @ioc: per adapter object
+ * @port_table: address where port table needs to be constructed
+ *
+ * return number of HBA port entries available after reset.
+ */
+static int
+_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port_table)
+{
+ u16 sz, ioc_status;
+ int i, j;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u64 attached_sas_addr;
+ u8 found = 0, port_count = 0, port_id;
+
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
+ * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return port_count;
+ }
+
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ found = 0;
+ if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
+ MPI2_SAS_NEG_LINK_RATE_1_5)
+ continue;
+ attached_handle =
+ le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
+ if (_scsih_get_sas_address(
+ ioc, attached_handle, &attached_sas_addr) != 0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+
+ for (j = 0; j < port_count; j++) {
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ if (port_table[j].port_id == port_id &&
+ port_table[j].sas_address == attached_sas_addr) {
+ port_table[j].phy_mask |= (1 << i);
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ port_table[port_count].port_id = port_id;
+ port_table[port_count].phy_mask = (1 << i);
+ port_table[port_count].sas_address = attached_sas_addr;
+ port_count++;
+ }
+out:
+ kfree(sas_iounit_pg0);
+ return port_count;
+}
+
+enum hba_port_matched_codes {
+ NOT_MATCHED = 0,
+ MATCHED_WITH_ADDR_AND_PHYMASK,
+ MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
+ MATCHED_WITH_ADDR_AND_SUBPHYMASK,
+ MATCHED_WITH_ADDR,
+};
+
+/**
+ * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
+ * from HBA port table
+ * @ioc: per adapter object
+ * @port_entry - hba port entry from temporary port table which needs to be
+ * searched for matched entry in the HBA port table
+ * @matched_port_entry - save matched hba port entry here
+ * @count - count of matched entries
+ *
+ * return type of matched entry found.
+ */
+static enum hba_port_matched_codes
+_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port_entry,
+ struct hba_port **matched_port_entry, int *count)
+{
+ struct hba_port *port_table_entry, *matched_port = NULL;
+ enum hba_port_matched_codes matched_code = NOT_MATCHED;
+ int lcount = 0;
+ *matched_port_entry = NULL;
+
+ list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
+ if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
+ continue;
+
+ if ((port_table_entry->sas_address == port_entry->sas_address)
+ && (port_table_entry->phy_mask == port_entry->phy_mask)) {
+ matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
+ matched_port = port_table_entry;
+ break;
+ }
+
+ if ((port_table_entry->sas_address == port_entry->sas_address)
+ && (port_table_entry->phy_mask & port_entry->phy_mask)
+ && (port_table_entry->port_id == port_entry->port_id)) {
+ matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
+ matched_port = port_table_entry;
+ continue;
+ }
+
+ if ((port_table_entry->sas_address == port_entry->sas_address)
+ && (port_table_entry->phy_mask & port_entry->phy_mask)) {
+ if (matched_code ==
+ MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
+ continue;
+ matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
+ matched_port = port_table_entry;
+ continue;
+ }
+
+ if (port_table_entry->sas_address == port_entry->sas_address) {
+ if (matched_code ==
+ MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
+ continue;
+ if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
+ continue;
+ matched_code = MATCHED_WITH_ADDR;
+ matched_port = port_table_entry;
+ lcount++;
+ }
+ }
+
+ *matched_port_entry = matched_port;
+ if (matched_code == MATCHED_WITH_ADDR)
+ *count = lcount;
+ return matched_code;
+}
+
+/**
+ * _scsih_del_phy_part_of_anther_port - remove phy if it
+ * is a part of anther port
+ *@ioc: per adapter object
+ *@port_table: port table after reset
+ *@index: hba port entry index
+ *@port_count: number of ports available after host reset
+ *@offset: HBA phy bit offset
+ *
+ */
+static void
+_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port_table,
+ int index, u8 port_count, int offset)
+{
+ struct _sas_node *sas_node = &ioc->sas_hba;
+ u32 i, found = 0;
+
+ for (i = 0; i < port_count; i++) {
+ if (i == index)
+ continue;
+
+ if (port_table[i].phy_mask & (1 << offset)) {
+ mpt3sas_transport_del_phy_from_an_existing_port(
+ ioc, sas_node, &sas_node->phy[offset]);
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ port_table[index].phy_mask |= (1 << offset);
+}
+
+/**
+ * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
+ * right port
+ *@ioc: per adapter object
+ *@hba_port_entry: hba port table entry
+ *@port_table: temporary port table
+ *@index: hba port entry index
+ *@port_count: number of ports available after host reset
+ *
+ */
+static void
+_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *hba_port_entry, struct hba_port *port_table,
+ int index, int port_count)
+{
+ u32 phy_mask, offset = 0;
+ struct _sas_node *sas_node = &ioc->sas_hba;
+
+ phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
+
+ for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
+ if (phy_mask & (1 << offset)) {
+ if (!(port_table[index].phy_mask & (1 << offset))) {
+ _scsih_del_phy_part_of_anther_port(
+ ioc, port_table, index, port_count,
+ offset);
+ continue;
+ }
+ if (sas_node->phy[offset].phy_belongs_to_port)
+ mpt3sas_transport_del_phy_from_an_existing_port(
+ ioc, sas_node, &sas_node->phy[offset]);
+ mpt3sas_transport_add_phy_to_an_existing_port(
+ ioc, sas_node, &sas_node->phy[offset],
+ hba_port_entry->sas_address,
+ hba_port_entry);
+ }
+ }
+}
+
+/**
+ * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct hba_port *port, *port_next;
+ struct virtual_phy *vphy, *vphy_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (!port->vphys_mask)
+ continue;
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
+ drsprintk(ioc, ioc_info(ioc,
+ "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
+ vphy, port->port_id,
+ vphy->phy_mask));
+ port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+ }
+ if (!port->vphys_mask && !port->sas_address)
+ port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
+ }
+}
+
+/**
+ * _scsih_del_dirty_port_entries - delete dirty port entries from port list
+ * after host reset
+ *@ioc: per adapter object
+ *
+ */
+static void
+_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct hba_port *port, *port_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
+ port->flags & HBA_PORT_FLAG_NEW_PORT)
+ continue;
+
+ drsprintk(ioc, ioc_info(ioc,
+ "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
+ port, port->port_id, port->phy_mask));
+ list_del(&port->list);
+ kfree(port);
+ }
+}
+
+/**
+ * _scsih_sas_port_refresh - Update HBA port table after host reset
+ * @ioc: per adapter object
+ */
+static void
+_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 port_count = 0;
+ struct hba_port *port_table;
+ struct hba_port *port_table_entry;
+ struct hba_port *port_entry = NULL;
+ int i, j, count = 0, lcount = 0;
+ int ret;
+ u64 sas_addr;
+
+ drsprintk(ioc, ioc_info(ioc,
+ "updating ports for sas_host(0x%016llx)\n",
+ (unsigned long long)ioc->sas_hba.sas_address));
+
+ port_table = kcalloc(ioc->sas_hba.num_phys,
+ sizeof(struct hba_port), GFP_KERNEL);
+ if (!port_table)
+ return;
+
+ port_count = _scsih_get_port_table_after_reset(ioc, port_table);
+ if (!port_count)
+ return;
+
+ drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
+ for (j = 0; j < port_count; j++)
+ drsprintk(ioc, ioc_info(ioc,
+ "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
+ port_table[j].port_id,
+ port_table[j].phy_mask, port_table[j].sas_address));
+
+ list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
+ port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
+
+ drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
+ port_table_entry = NULL;
+ list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
+ drsprintk(ioc, ioc_info(ioc,
+ "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
+ port_table_entry->port_id,
+ port_table_entry->phy_mask,
+ port_table_entry->sas_address));
+ }
+
+ for (j = 0; j < port_count; j++) {
+ ret = _scsih_look_and_get_matched_port_entry(ioc,
+ &port_table[j], &port_entry, &count);
+ if (!port_entry) {
+ drsprintk(ioc, ioc_info(ioc,
+ "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
+ port_table[j].sas_address,
+ port_table[j].port_id));
+ continue;
+ }
+
+ switch (ret) {
+ case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
+ case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
+ _scsih_add_or_del_phys_from_existing_port(ioc,
+ port_entry, port_table, j, port_count);
+ break;
+ case MATCHED_WITH_ADDR:
+ sas_addr = port_table[j].sas_address;
+ for (i = 0; i < port_count; i++) {
+ if (port_table[i].sas_address == sas_addr)
+ lcount++;
+ }
+
+ if (count > 1 || lcount > 1)
+ port_entry = NULL;
+ else
+ _scsih_add_or_del_phys_from_existing_port(ioc,
+ port_entry, port_table, j, port_count);
+ }
+
+ if (!port_entry)
+ continue;
+
+ if (port_entry->port_id != port_table[j].port_id)
+ port_entry->port_id = port_table[j].port_id;
+ port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
+ port_entry->phy_mask = port_table[j].phy_mask;
+ }
+
+ port_table_entry = NULL;
+}
+
+/**
+ * _scsih_alloc_vphy - allocate virtual_phy object
+ * @ioc: per adapter object
+ * @port_id: Port ID number
+ * @phy_num: HBA Phy number
+ *
+ * Returns allocated virtual_phy object.
+ */
+static struct virtual_phy *
+_scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
+{
+ struct virtual_phy *vphy;
+ struct hba_port *port;
+
+ port = mpt3sas_get_port_by_id(ioc, port_id, 0);
+ if (!port)
+ return NULL;
+
+ vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
+ if (!vphy) {
+ vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
+ if (!vphy)
+ return NULL;
+
+ /*
+ * Enable bit corresponding to HBA phy number on its
+ * parent hba_port object's vphys_mask field.
+ */
+ port->vphys_mask |= (1 << phy_num);
+ vphy->phy_mask |= (1 << phy_num);
+
+ INIT_LIST_HEAD(&port->vphys_list);
+ list_add_tail(&vphy->list, &port->vphys_list);
+
+ ioc_info(ioc,
+ "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
+ vphy, port->port_id, phy_num);
+ }
+ return vphy;
+}
+
+/**
* _scsih_sas_host_refresh - refreshing sas host object contents
* @ioc: per adapter object
* Context: user
@@ -5732,7 +6510,9 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
Mpi2ConfigReply_t mpi_reply;
Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
u16 attached_handle;
- u8 link_rate;
+ u8 link_rate, port_id;
+ struct hba_port *port;
+ Mpi2SasPhyPage0_t phy_pg0;
dtmprintk(ioc,
ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
@@ -5756,15 +6536,57 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
if (i == 0)
- ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
- PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.handle = le16_to_cpu(
+ sas_iounit_pg0->PhyData[0].ControllerDevHandle);
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
+ port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
+ if (!port)
+ goto out;
+
+ port->port_id = port_id;
+ ioc_info(ioc,
+ "hba_port entry: %p, port: %d is added to hba_port list\n",
+ port, port->port_id);
+ if (ioc->shost_recovery)
+ port->flags = HBA_PORT_FLAG_NEW_PORT;
+ list_add_tail(&port->list, &ioc->port_table_list);
+ }
+ /*
+ * Check whether current Phy belongs to HBA vSES device or not.
+ */
+ if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
+ MPI2_SAS_DEVICE_INFO_SEP &&
+ (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
+ &phy_pg0, i))) {
+ ioc_err(ioc,
+ "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (!(le32_to_cpu(phy_pg0.PhyInfo) &
+ MPI2_SAS_PHYINFO_VIRTUAL_PHY))
+ continue;
+ /*
+ * Allocate a virtual_phy object for vSES device, if
+ * this vSES device is hot added.
+ */
+ if (!_scsih_alloc_vphy(ioc, port_id, i))
+ goto out;
+ ioc->sas_hba.phy[i].hba_vphy = 1;
+ }
+
ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
AttachedDevHandle);
if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
+ ioc->sas_hba.phy[i].port =
+ mpt3sas_get_port_by_id(ioc, port_id, 0);
mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
- attached_handle, i, link_rate);
+ attached_handle, i, link_rate,
+ ioc->sas_hba.phy[i].port);
}
out:
kfree(sas_iounit_pg0);
@@ -5789,7 +6611,8 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u16 sz;
u8 device_missing_delay;
- u8 num_phys;
+ u8 num_phys, port_id;
+ struct hba_port *port;
mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
if (!num_phys) {
@@ -5882,8 +6705,40 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
if (i == 0)
ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
PhyData[0].ControllerDevHandle);
+
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
+ port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
+ if (!port)
+ goto out;
+
+ port->port_id = port_id;
+ ioc_info(ioc,
+ "hba_port entry: %p, port: %d is added to hba_port list\n",
+ port, port->port_id);
+ list_add_tail(&port->list,
+ &ioc->port_table_list);
+ }
+
+ /*
+ * Check whether current Phy belongs to HBA vSES device or not.
+ */
+ if ((le32_to_cpu(phy_pg0.PhyInfo) &
+ MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
+ (phy_pg0.NegotiatedLinkRate >> 4) >=
+ MPI2_SAS_NEG_LINK_RATE_1_5) {
+ /*
+ * Allocate a virtual_phy object for vSES device.
+ */
+ if (!_scsih_alloc_vphy(ioc, port_id, i))
+ goto out;
+ ioc->sas_hba.phy[i].hba_vphy = 1;
+ }
+
ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
ioc->sas_hba.phy[i].phy_id = i;
+ ioc->sas_hba.phy[i].port =
+ mpt3sas_get_port_by_id(ioc, port_id, 0);
mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
phy_pg0, ioc->sas_hba.parent_dev);
}
@@ -5937,6 +6792,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
int i;
unsigned long flags;
struct _sas_port *mpt3sas_port = NULL;
+ u8 port_id;
int rc = 0;
@@ -5969,10 +6825,13 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
__FILE__, __LINE__, __func__);
return -1;
}
+
+ port_id = expander_pg0.PhysicalPort;
if (sas_address_parent != ioc->sas_hba.sas_address) {
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address_parent);
+ sas_address_parent,
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!sas_expander) {
rc = _scsih_expander_add(ioc, parent_handle);
@@ -5984,7 +6843,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_address = le64_to_cpu(expander_pg0.SASAddress);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address);
+ sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (sas_expander)
@@ -6002,6 +6861,13 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander->num_phys = expander_pg0.NumPhys;
sas_expander->sas_address_parent = sas_address_parent;
sas_expander->sas_address = sas_address;
+ sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
+ if (!sas_expander->port) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
handle, parent_handle,
@@ -6020,7 +6886,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
INIT_LIST_HEAD(&sas_expander->sas_port_list);
mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
- sas_address_parent);
+ sas_address_parent, sas_expander->port);
if (!mpt3sas_port) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -6028,6 +6894,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
goto out_fail;
}
sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
+ sas_expander->rphy = mpt3sas_port->rphy;
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
@@ -6039,6 +6906,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
sas_expander->phy[i].handle = handle;
sas_expander->phy[i].phy_id = i;
+ sas_expander->phy[i].port =
+ mpt3sas_get_port_by_id(ioc, port_id, 0);
if ((mpt3sas_transport_add_expander_phy(ioc,
&sas_expander->phy[i], expander_pg1,
@@ -6066,7 +6935,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (mpt3sas_port)
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
- sas_address_parent);
+ sas_address_parent, sas_expander->port);
kfree(sas_expander);
return rc;
}
@@ -6077,7 +6946,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @sas_address: expander sas_address
*/
void
-mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port)
{
struct _sas_node *sas_expander;
unsigned long flags;
@@ -6085,9 +6955,12 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
if (ioc->shost_recovery)
return;
+ if (!port)
+ return;
+
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address);
+ sas_address, port);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (sas_expander)
_scsih_expander_node_remove(ioc, sas_expander);
@@ -6210,7 +7083,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
- struct _sas_device *sas_device;
+ struct _sas_device *sas_device = NULL;
struct _enclosure_node *enclosure_dev = NULL;
u32 ioc_status;
unsigned long flags;
@@ -6218,6 +7091,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
struct scsi_target *starget;
struct MPT3SAS_TARGET *sas_target_priv_data;
u32 device_info;
+ struct hba_port *port;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
@@ -6240,8 +7114,11 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
+ if (!port)
+ goto out_unlock;
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_address);
+ sas_address, port);
if (!sas_device)
goto out_unlock;
@@ -6297,7 +7174,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
goto out_unlock;
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- _scsih_ublock_io_device(ioc, sas_address);
+ _scsih_ublock_io_device(ioc, sas_address, port);
if (sas_device)
sas_device_put(sas_device);
@@ -6331,6 +7208,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
u32 ioc_status;
u64 sas_address;
u32 device_info;
+ u8 port_id;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -6367,8 +7245,9 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device_pg0.AccessStatus))
return -1;
+ port_id = sas_device_pg0.PhysicalPort;
sas_device = mpt3sas_get_sdev_by_addr(ioc,
- sas_address);
+ sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
if (sas_device) {
clear_bit(handle, ioc->pend_os_device_add);
sas_device_put(sas_device);
@@ -6409,6 +7288,12 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->phy = sas_device_pg0.PhyNum;
sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+ sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
+ if (!sas_device->port) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
if (le16_to_cpu(sas_device_pg0.Flags)
& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
@@ -6442,6 +7327,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
else
_scsih_sas_device_add(ioc, sas_device);
+out:
sas_device_put(sas_device);
return 0;
}
@@ -6474,7 +7360,8 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
if (sas_device->starget && sas_device->starget->hostdata) {
sas_target_priv_data = sas_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- _scsih_ublock_io_device(ioc, sas_device->sas_address);
+ _scsih_ublock_io_device(ioc, sas_device->sas_address,
+ sas_device->port);
sas_target_priv_data->handle =
MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -6482,7 +7369,8 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
if (!ioc->hide_drives)
mpt3sas_transport_port_remove(ioc,
sas_device->sas_address,
- sas_device->sas_address_parent);
+ sas_device->sas_address_parent,
+ sas_device->port);
ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
sas_device->handle, (u64)sas_device->sas_address);
@@ -6593,6 +7481,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address;
unsigned long flags;
u8 link_rate, prev_link_rate;
+ struct hba_port *port;
Mpi2EventDataSasTopologyChangeList_t *event_data =
(Mpi2EventDataSasTopologyChangeList_t *)
fw_event->event_data;
@@ -6614,6 +7503,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
}
parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+ port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
/* handle expander add */
if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
@@ -6626,6 +7516,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (sas_expander) {
sas_address = sas_expander->sas_address;
max_phys = sas_expander->num_phys;
+ port = sas_expander->port;
} else if (parent_handle < ioc->sas_hba.num_phys) {
sas_address = ioc->sas_hba.sas_address;
max_phys = ioc->sas_hba.num_phys;
@@ -6668,7 +7559,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
mpt3sas_transport_update_links(ioc, sas_address,
- handle, phy_number, link_rate);
+ handle, phy_number, link_rate, port);
if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
break;
@@ -6687,7 +7578,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
mpt3sas_transport_update_links(ioc, sas_address,
- handle, phy_number, link_rate);
+ handle, phy_number, link_rate, port);
_scsih_add_device(ioc, handle, phy_number, 0);
@@ -6702,7 +7593,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/* handle expander removal */
if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
sas_expander)
- mpt3sas_expander_remove(ioc, sas_address);
+ mpt3sas_expander_remove(ioc, sas_address, port);
return 0;
}
@@ -6803,7 +7694,8 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_address = le64_to_cpu(event_data->SASAddress);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- sas_address);
+ sas_address,
+ mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
if (!sas_device || !sas_device->starget)
goto out;
@@ -6952,7 +7844,7 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
if (pcie_device->starget && pcie_device->starget->hostdata) {
sas_target_priv_data = pcie_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- _scsih_ublock_io_device(ioc, pcie_device->wwid);
+ _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -7074,7 +7966,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
pcie_device_put(pcie_device);
- _scsih_ublock_io_device(ioc, wwid);
+ _scsih_ublock_io_device(ioc, wwid, NULL);
return;
}
@@ -8243,7 +9135,9 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
mpt3sas_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc,
+ sas_device_pg0.PhysicalPort, 0));
_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
_scsih_add_device(ioc, handle, 0, 1);
@@ -8549,7 +9443,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
mpt3sas_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc,
+ sas_device_pg0.PhysicalPort, 0));
_scsih_add_device(ioc, handle, 0, 1);
@@ -8674,6 +9570,8 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
struct _sas_device *sas_device = NULL;
struct _enclosure_node *enclosure_dev = NULL;
unsigned long flags;
+ struct hba_port *port = mpt3sas_get_port_by_id(
+ ioc, sas_device_pg0->PhysicalPort, 0);
if (sas_device_pg0->EnclosureHandle) {
enclosure_dev =
@@ -8685,69 +9583,71 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if ((sas_device->sas_address == le64_to_cpu(
- sas_device_pg0->SASAddress)) && (sas_device->slot ==
- le16_to_cpu(sas_device_pg0->Slot))) {
- sas_device->responding = 1;
- starget = sas_device->starget;
- if (starget && starget->hostdata) {
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
- sas_target_priv_data->deleted = 0;
- } else
- sas_target_priv_data = NULL;
- if (starget) {
- starget_printk(KERN_INFO, starget,
- "handle(0x%04x), sas_addr(0x%016llx)\n",
- le16_to_cpu(sas_device_pg0->DevHandle),
- (unsigned long long)
- sas_device->sas_address);
+ if (sas_device->sas_address != le64_to_cpu(
+ sas_device_pg0->SASAddress))
+ continue;
+ if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
+ continue;
+ if (sas_device->port != port)
+ continue;
+ sas_device->responding = 1;
+ starget = sas_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget) {
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_addr(0x%016llx)\n",
+ le16_to_cpu(sas_device_pg0->DevHandle),
+ (unsigned long long)
+ sas_device->sas_address);
- if (sas_device->enclosure_handle != 0)
- starget_printk(KERN_INFO, starget,
- "enclosure logical id(0x%016llx),"
- " slot(%d)\n",
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- }
- if (le16_to_cpu(sas_device_pg0->Flags) &
- MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
- sas_device->enclosure_level =
- sas_device_pg0->EnclosureLevel;
- memcpy(&sas_device->connector_name[0],
- &sas_device_pg0->ConnectorName[0], 4);
- } else {
- sas_device->enclosure_level = 0;
- sas_device->connector_name[0] = '\0';
- }
+ if (sas_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), slot(%d)\n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ if (le16_to_cpu(sas_device_pg0->Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ sas_device->enclosure_level =
+ sas_device_pg0->EnclosureLevel;
+ memcpy(&sas_device->connector_name[0],
+ &sas_device_pg0->ConnectorName[0], 4);
+ } else {
+ sas_device->enclosure_level = 0;
+ sas_device->connector_name[0] = '\0';
+ }
- sas_device->enclosure_handle =
- le16_to_cpu(sas_device_pg0->EnclosureHandle);
- sas_device->is_chassis_slot_valid = 0;
- if (enclosure_dev) {
- sas_device->enclosure_logical_id = le64_to_cpu(
- enclosure_dev->pg0.EnclosureLogicalID);
- if (le16_to_cpu(enclosure_dev->pg0.Flags) &
- MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
- sas_device->is_chassis_slot_valid = 1;
- sas_device->chassis_slot =
- enclosure_dev->pg0.ChassisSlot;
- }
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0->EnclosureHandle);
+ sas_device->is_chassis_slot_valid = 0;
+ if (enclosure_dev) {
+ sas_device->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_dev->pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot =
+ enclosure_dev->pg0.ChassisSlot;
}
+ }
- if (sas_device->handle == le16_to_cpu(
- sas_device_pg0->DevHandle))
- goto out;
- pr_info("\thandle changed from(0x%04x)!!!\n",
- sas_device->handle);
- sas_device->handle = le16_to_cpu(
- sas_device_pg0->DevHandle);
- if (sas_target_priv_data)
- sas_target_priv_data->handle =
- le16_to_cpu(sas_device_pg0->DevHandle);
+ if (sas_device->handle == le16_to_cpu(
+ sas_device_pg0->DevHandle))
goto out;
- }
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ sas_device->handle);
+ sas_device->handle = le16_to_cpu(
+ sas_device_pg0->DevHandle);
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle =
+ le16_to_cpu(sas_device_pg0->DevHandle);
+ goto out;
}
out:
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -9098,6 +9998,8 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
u16 handle = le16_to_cpu(expander_pg0->DevHandle);
u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
+ struct hba_port *port = mpt3sas_get_port_by_id(
+ ioc, expander_pg0->PhysicalPort, 0);
if (enclosure_handle)
enclosure_dev =
@@ -9108,6 +10010,8 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
if (sas_expander->sas_address != sas_address)
continue;
+ if (sas_expander->port != port)
+ continue;
sas_expander->responding = 1;
if (enclosure_dev) {
@@ -9147,6 +10051,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u64 sas_address;
u16 handle;
+ u8 port;
ioc_info(ioc, "search for expanders: start\n");
@@ -9164,9 +10069,12 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
handle = le16_to_cpu(expander_pg0.DevHandle);
sas_address = le64_to_cpu(expander_pg0.SASAddress);
- pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
- handle,
- (unsigned long long)sas_address);
+ port = expander_pg0.PhysicalPort;
+ pr_info(
+ "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ handle, (unsigned long long)sas_address,
+ (ioc->multipath_on_hba ?
+ port : MULTIPATH_DISABLED_PORT_ID));
_scsih_mark_responding_expander(ioc, &expander_pg0);
}
@@ -9288,7 +10196,8 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
le16_to_cpu(expander_pg1.AttachedDevHandle), i,
- expander_pg1.NegotiatedLinkRate >> 4);
+ expander_pg1.NegotiatedLinkRate >> 4,
+ sas_expander->port);
}
}
@@ -9307,7 +10216,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
Mpi2RaidPhysDiskPage0_t pd_pg0;
Mpi2EventIrConfigElement_t element;
Mpi2ConfigReply_t mpi_reply;
- u8 phys_disk_num;
+ u8 phys_disk_num, port_id;
u16 ioc_status;
u16 handle, parent_handle;
u64 sas_address;
@@ -9337,8 +10246,10 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
}
handle = le16_to_cpu(expander_pg0.DevHandle);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ port_id = expander_pg0.PhysicalPort;
expander_device = mpt3sas_scsih_expander_find_by_sas_address(
- ioc, le64_to_cpu(expander_pg0.SASAddress));
+ ioc, le64_to_cpu(expander_pg0.SASAddress),
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (expander_device)
_scsih_refresh_expander_links(ioc, expander_device,
@@ -9397,9 +10308,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
handle,
(u64)le64_to_cpu(sas_device_pg0.SASAddress));
+ port_id = sas_device_pg0.PhysicalPort;
mpt3sas_transport_update_links(ioc, sas_address,
handle, sas_device_pg0.PhyNum,
- MPI2_SAS_NEG_LINK_RATE_1_5);
+ MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
set_bit(handle, ioc->pd_handles);
retry_count = 0;
/* This will retry adding the end device.
@@ -9485,8 +10398,10 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
if (!(_scsih_is_end_device(
le32_to_cpu(sas_device_pg0.DeviceInfo))))
continue;
+ port_id = sas_device_pg0.PhysicalPort;
sas_device = mpt3sas_get_sdev_by_addr(ioc,
- le64_to_cpu(sas_device_pg0.SASAddress));
+ le64_to_cpu(sas_device_pg0.SASAddress),
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
if (sas_device) {
sas_device_put(sas_device);
continue;
@@ -9497,7 +10412,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
handle,
(u64)le64_to_cpu(sas_device_pg0.SASAddress));
mpt3sas_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
retry_count = 0;
/* This will retry adding the end device.
* _scsih_add_device() will decide on retries and
@@ -9601,6 +10517,10 @@ mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
!ioc->sas_hba.num_phys)) {
+ if (ioc->multipath_on_hba) {
+ _scsih_sas_port_refresh(ioc);
+ _scsih_update_vphys_after_reset(ioc);
+ }
_scsih_prep_device_scan(ioc);
_scsih_create_enclosure_list_after_reset(ioc);
_scsih_search_responding_sas_devices(ioc);
@@ -9648,6 +10568,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
ssleep(1);
}
_scsih_remove_unresponding_devices(ioc);
+ _scsih_del_dirty_vphy(ioc);
+ _scsih_del_dirty_port_entries(ioc);
_scsih_scan_for_devices_after_reset(ioc);
_scsih_set_nvme_max_shutdown_latency(ioc);
break;
@@ -9930,21 +10852,25 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
if (mpt3sas_port->remote_identify.device_type ==
SAS_END_DEVICE)
mpt3sas_device_remove_by_sas_address(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
else if (mpt3sas_port->remote_identify.device_type ==
SAS_EDGE_EXPANDER_DEVICE ||
mpt3sas_port->remote_identify.device_type ==
SAS_FANOUT_EXPANDER_DEVICE)
mpt3sas_expander_remove(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
}
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
- sas_expander->sas_address_parent);
+ sas_expander->sas_address_parent, sas_expander->port);
- ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
- sas_expander->handle, (unsigned long long)
- sas_expander->sas_address);
+ ioc_info(ioc,
+ "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address,
+ sas_expander->port->port_id);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_del(&sas_expander->list);
@@ -10136,6 +11062,7 @@ static void scsih_remove(struct pci_dev *pdev)
struct workqueue_struct *wq;
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
+ struct hba_port *port, *port_next;
if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
return;
@@ -10189,13 +11116,21 @@ static void scsih_remove(struct pci_dev *pdev)
if (mpt3sas_port->remote_identify.device_type ==
SAS_END_DEVICE)
mpt3sas_device_remove_by_sas_address(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
else if (mpt3sas_port->remote_identify.device_type ==
SAS_EDGE_EXPANDER_DEVICE ||
mpt3sas_port->remote_identify.device_type ==
SAS_FANOUT_EXPANDER_DEVICE)
mpt3sas_expander_remove(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
+ }
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ list_del(&port->list);
+ kfree(port);
}
/* free phys attached to the sas_host */
@@ -10277,6 +11212,7 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
unsigned long flags;
int rc;
int tid;
+ struct hba_port *port;
/* no Bios, return immediately */
if (!ioc->bios_pg3.BiosVersion)
@@ -10318,19 +11254,24 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
handle = sas_device->handle;
sas_address_parent = sas_device->sas_address_parent;
sas_address = sas_device->sas_address;
+ port = sas_device->port;
list_move_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (ioc->hide_drives)
return;
+
+ if (!port)
+ return;
+
if (!mpt3sas_transport_port_add(ioc, handle,
- sas_address_parent)) {
+ sas_address_parent, port)) {
_scsih_sas_device_remove(ioc, sas_device);
} else if (!sas_device->starget) {
if (!ioc->is_driver_loading) {
mpt3sas_transport_port_remove(ioc,
sas_address,
- sas_address_parent);
+ sas_address_parent, port);
_scsih_sas_device_remove(ioc, sas_device);
}
}
@@ -10418,7 +11359,7 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
while ((sas_device = get_next_sas_device(ioc))) {
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent)) {
+ sas_device->sas_address_parent, sas_device->port)) {
_scsih_sas_device_remove(ioc, sas_device);
sas_device_put(sas_device);
continue;
@@ -10432,7 +11373,8 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->is_driver_loading) {
mpt3sas_transport_port_remove(ioc,
sas_device->sas_address,
- sas_device->sas_address_parent);
+ sas_device->sas_address_parent,
+ sas_device->port);
_scsih_sas_device_remove(ioc, sas_device);
sas_device_put(sas_device);
continue;
@@ -10866,6 +11808,12 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
break;
}
+
+ if (multipath_on_hba == -1 || multipath_on_hba == 0)
+ ioc->multipath_on_hba = 0;
+ else
+ ioc->multipath_on_hba = 1;
+
break;
case MPI25_VERSION:
case MPI26_VERSION:
@@ -10927,6 +11875,23 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->combined_reply_index_count =
MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
}
+
+ switch (ioc->is_gen35_ioc) {
+ case 0:
+ if (multipath_on_hba == -1 || multipath_on_hba == 0)
+ ioc->multipath_on_hba = 0;
+ else
+ ioc->multipath_on_hba = 1;
+ break;
+ case 1:
+ if (multipath_on_hba == -1 || multipath_on_hba > 0)
+ ioc->multipath_on_hba = 1;
+ else
+ ioc->multipath_on_hba = 0;
+ default:
+ break;
+ }
+
break;
default:
return -ENODEV;
@@ -10987,6 +11952,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
INIT_LIST_HEAD(&ioc->reply_queue_list);
+ INIT_LIST_HEAD(&ioc->port_table_list);
sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
@@ -11084,20 +12050,18 @@ out_add_shost_fail:
return rv;
}
-#ifdef CONFIG_PM
/**
* scsih_suspend - power management suspend main entry point
- * @pdev: PCI device struct
- * @state: PM state change to (usually PCI_D3)
+ * @dev: Device struct
*
* Return: 0 success, anything else error.
*/
-static int
-scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+scsih_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *shost;
struct MPT3SAS_ADAPTER *ioc;
- pci_power_t device_state;
int rc;
rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
@@ -11108,25 +12072,23 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
flush_scheduled_work();
scsi_block_requests(shost);
_scsih_nvme_shutdown(ioc);
- device_state = pci_choose_state(pdev, state);
- ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- pdev, pci_name(pdev), device_state);
+ ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
+ pdev, pci_name(pdev));
- pci_save_state(pdev);
mpt3sas_base_free_resources(ioc);
- pci_set_power_state(pdev, device_state);
return 0;
}
/**
* scsih_resume - power management resume main entry point
- * @pdev: PCI device struct
+ * @dev: Device struct
*
* Return: 0 success, anything else error.
*/
-static int
-scsih_resume(struct pci_dev *pdev)
+static int __maybe_unused
+scsih_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *shost;
struct MPT3SAS_ADAPTER *ioc;
pci_power_t device_state = pdev->current_state;
@@ -11139,9 +12101,6 @@ scsih_resume(struct pci_dev *pdev)
ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
ioc->pdev = pdev;
r = mpt3sas_base_map_resources(ioc);
if (r)
@@ -11152,7 +12111,6 @@ scsih_resume(struct pci_dev *pdev)
mpt3sas_base_start_watchdog(ioc);
return 0;
}
-#endif /* CONFIG_PM */
/**
* scsih_pci_error_detected - Called when a PCI error is detected.
@@ -11454,6 +12412,8 @@ static struct pci_error_handlers _mpt3sas_err_handler = {
.resume = scsih_pci_resume,
};
+static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
+
static struct pci_driver mpt3sas_driver = {
.name = MPT3SAS_DRIVER_NAME,
.id_table = mpt3sas_pci_table,
@@ -11461,10 +12421,7 @@ static struct pci_driver mpt3sas_driver = {
.remove = scsih_remove,
.shutdown = scsih_shutdown,
.err_handler = &_mpt3sas_err_handler,
-#ifdef CONFIG_PM
- .suspend = scsih_suspend,
- .resume = scsih_resume,
-#endif
+ .driver.pm = &scsih_pm_ops,
};
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 6ec5b7f33dfd..6f4708224755 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -61,9 +61,28 @@
#include "mpt3sas_base.h"
/**
+ * _transport_get_port_id_by_sas_phy - get zone's port id that Phy belong to
+ * @phy - sas_phy object
+ *
+ * Return Port number
+ */
+static inline u8
+_transport_get_port_id_by_sas_phy(struct sas_phy *phy)
+{
+ u8 port_id = 0xFF;
+ struct hba_port *port = phy->hostdata;
+
+ if (port)
+ port_id = port->port_id;
+
+ return port_id;
+}
+
+/**
* _transport_sas_node_find_by_sas_address - sas node search
* @ioc: per adapter object
* @sas_address: sas address of expander or sas host
+ * @port: hba port entry
* Context: Calling function should acquire ioc->sas_node_lock.
*
* Search for either hba phys or expander device based on handle, then returns
@@ -71,13 +90,56 @@
*/
static struct _sas_node *
_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
if (ioc->sas_hba.sas_address == sas_address)
return &ioc->sas_hba;
else
return mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address);
+ sas_address, port);
+}
+
+/**
+ * _transport_get_port_id_by_rphy - Get Port number from rphy object
+ * @ioc: per adapter object
+ * @rphy: sas_rphy object
+ *
+ * Returns Port number.
+ */
+static u8
+_transport_get_port_id_by_rphy(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_rphy *rphy)
+{
+ struct _sas_node *sas_expander;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ if (!rphy)
+ return port_id;
+
+ if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander,
+ &ioc->sas_expander_list, list) {
+ if (sas_expander->rphy == rphy) {
+ port_id = sas_expander->port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ } else if (rphy->identify.device_type == SAS_END_DEVICE) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
+ if (sas_device) {
+ port_id = sas_device->port->port_id;
+ sas_device_put(sas_device);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ return port_id;
}
/**
@@ -288,7 +350,7 @@ struct rep_manu_reply {
*/
static int
_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address, struct sas_expander_device *edev)
+ u64 sas_address, struct sas_expander_device *edev, u8 port_id)
{
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
@@ -355,7 +417,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = 0xFF;
+ mpi_request->PhysicalPort = port_id;
mpi_request->SASAddress = cpu_to_le64(sas_address);
mpi_request->RequestDataLength = cpu_to_le16(data_out_sz);
psge = &mpi_request->SGL;
@@ -439,6 +501,7 @@ _transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
struct _sas_port *mpt3sas_port)
{
u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+ struct hba_port *port = mpt3sas_port->hba_port;
enum sas_device_type device_type =
mpt3sas_port->remote_identify.device_type;
@@ -448,10 +511,11 @@ _transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
ioc->logging_level |= MPT_DEBUG_TRANSPORT;
if (device_type == SAS_END_DEVICE)
- mpt3sas_device_remove_by_sas_address(ioc, sas_address);
+ mpt3sas_device_remove_by_sas_address(ioc,
+ sas_address, port);
else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
device_type == SAS_FANOUT_EXPANDER_DEVICE)
- mpt3sas_expander_remove(ioc, sas_address);
+ mpt3sas_expander_remove(ioc, sas_address, port);
ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
}
@@ -500,16 +564,17 @@ _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
}
/**
- * _transport_add_phy_to_an_existing_port - adding new phy to existing port
+ * mpt3sas_transport_add_phy_to_an_existing_port - adding new phy to existing port
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
* @sas_address: sas address of device/expander were phy needs to be added to
+ * @port: hba port entry
*/
-static void
-_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+void
+mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
struct _sas_port *mpt3sas_port;
struct _sas_phy *phy_srch;
@@ -517,11 +582,16 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
if (mpt3sas_phy->phy_belongs_to_port == 1)
return;
+ if (!port)
+ return;
+
list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list,
port_list) {
if (mpt3sas_port->remote_identify.sas_address !=
sas_address)
continue;
+ if (mpt3sas_port->hba_port != port)
+ continue;
list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
port_siblings) {
if (phy_srch == mpt3sas_phy)
@@ -534,13 +604,13 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _transport_del_phy_from_an_existing_port - delete phy from existing port
+ * mpt3sas_transport_del_phy_from_an_existing_port - delete phy from existing port
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
*/
-static void
-_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+void
+mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy)
{
struct _sas_port *mpt3sas_port, *next;
@@ -556,7 +626,11 @@ _transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
if (phy_srch != mpt3sas_phy)
continue;
- if (mpt3sas_port->num_phys == 1)
+ /*
+ * Don't delete port during host reset,
+ * just delete phy.
+ */
+ if (mpt3sas_port->num_phys == 1 && !ioc->shost_recovery)
_transport_delete_port(ioc, mpt3sas_port);
else
_transport_delete_phy(ioc, mpt3sas_port,
@@ -571,21 +645,24 @@ _transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @sas_address: sas address of device being added
+ * @port: hba port entry
*
* See the explanation above from _transport_delete_duplicate_port
*/
static void
_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
int i;
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address != sas_address)
continue;
+ if (sas_node->phy[i].port != port)
+ continue;
if (sas_node->phy[i].phy_belongs_to_port == 1)
- _transport_del_phy_from_an_existing_port(ioc, sas_node,
- &sas_node->phy[i]);
+ mpt3sas_transport_del_phy_from_an_existing_port(ioc,
+ sas_node, &sas_node->phy[i]);
}
}
@@ -594,6 +671,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
* @ioc: per adapter object
* @handle: handle of attached device
* @sas_address: sas address of parent expander or sas host
+ * @port: hba port entry
* Context: This function will acquire ioc->sas_node_lock.
*
* Adding new port object to the sas_node->sas_port_list.
@@ -602,7 +680,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
*/
struct _sas_port *
mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 sas_address)
+ u64 sas_address, struct hba_port *hba_port)
{
struct _sas_phy *mpt3sas_phy, *next;
struct _sas_port *mpt3sas_port;
@@ -612,6 +690,13 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
struct _sas_device *sas_device = NULL;
int i;
struct sas_port *port;
+ struct virtual_phy *vphy = NULL;
+
+ if (!hba_port) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return NULL;
+ }
mpt3sas_port = kzalloc(sizeof(struct _sas_port),
GFP_KERNEL);
@@ -624,7 +709,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
INIT_LIST_HEAD(&mpt3sas_port->port_list);
INIT_LIST_HEAD(&mpt3sas_port->phy_list);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc,
+ sas_address, hba_port);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!sas_node) {
@@ -646,16 +732,32 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
+ mpt3sas_port->hba_port = hba_port;
_transport_sanity_check(ioc, sas_node,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address, hba_port);
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address !=
mpt3sas_port->remote_identify.sas_address)
continue;
+ if (sas_node->phy[i].port != hba_port)
+ continue;
list_add_tail(&sas_node->phy[i].port_siblings,
&mpt3sas_port->phy_list);
mpt3sas_port->num_phys++;
+ if (sas_node->handle <= ioc->sas_hba.num_phys) {
+ if (!sas_node->phy[i].hba_vphy) {
+ hba_port->phy_mask |= (1 << i);
+ continue;
+ }
+
+ vphy = mpt3sas_get_vphy_by_phy(ioc, hba_port, i);
+ if (!vphy) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+ }
}
if (!mpt3sas_port->num_phys) {
@@ -664,6 +766,18 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ sas_device = mpt3sas_get_sdev_by_addr(ioc,
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
+ if (!sas_device) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+ sas_device->pend_sas_rphy_add = 1;
+ }
+
if (!sas_node->parent_dev) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -686,29 +800,31 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpt3sas_phy->phy_id);
sas_port_add_phy(port, mpt3sas_phy->phy);
mpt3sas_phy->phy_belongs_to_port = 1;
+ mpt3sas_phy->port = hba_port;
}
mpt3sas_port->port = port;
- if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE)
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
rphy = sas_end_device_alloc(port);
- else
+ sas_device->rphy = rphy;
+ if (sas_node->handle <= ioc->sas_hba.num_phys) {
+ if (!vphy)
+ hba_port->sas_address =
+ sas_device->sas_address;
+ else
+ vphy->sas_address =
+ sas_device->sas_address;
+ }
+ } else {
rphy = sas_expander_alloc(port,
mpt3sas_port->remote_identify.device_type);
+ if (sas_node->handle <= ioc->sas_hba.num_phys)
+ hba_port->sas_address =
+ mpt3sas_port->remote_identify.sas_address;
+ }
rphy->identify = mpt3sas_port->remote_identify;
- if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
- sas_device = mpt3sas_get_sdev_by_addr(ioc,
- mpt3sas_port->remote_identify.sas_address);
- if (!sas_device) {
- dfailprintk(ioc,
- ioc_info(ioc, "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__));
- goto out_fail;
- }
- sas_device->pend_sas_rphy_add = 1;
- }
-
if ((sas_rphy_add(rphy))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -735,7 +851,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
_transport_expander_report_manufacture(ioc,
mpt3sas_port->remote_identify.sas_address,
- rphy_to_expander_device(rphy));
+ rphy_to_expander_device(rphy), hba_port->port_id);
return mpt3sas_port;
out_fail:
@@ -751,6 +867,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
* @ioc: per adapter object
* @sas_address: sas address of attached device
* @sas_address_parent: sas address of parent expander or sas host
+ * @port: hba port entry
* Context: This function will acquire ioc->sas_node_lock.
*
* Removing object and freeing associated memory from the
@@ -758,7 +875,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
*/
void
mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
- u64 sas_address_parent)
+ u64 sas_address_parent, struct hba_port *port)
{
int i;
unsigned long flags;
@@ -766,10 +883,15 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
struct _sas_node *sas_node;
u8 found = 0;
struct _sas_phy *mpt3sas_phy, *next_phy;
+ struct hba_port *hba_port_next, *hba_port = NULL;
+ struct virtual_phy *vphy, *vphy_next = NULL;
+
+ if (!port)
+ return;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_node = _transport_sas_node_find_by_sas_address(ioc,
- sas_address_parent);
+ sas_address_parent, port);
if (!sas_node) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
@@ -778,6 +900,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
port_list) {
if (mpt3sas_port->remote_identify.sas_address != sas_address)
continue;
+ if (mpt3sas_port->hba_port != port)
+ continue;
found = 1;
list_del(&mpt3sas_port->port_list);
goto out;
@@ -788,6 +912,61 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
return;
}
+ if (sas_node->handle <= ioc->sas_hba.num_phys &&
+ (ioc->multipath_on_hba)) {
+ if (port->vphys_mask) {
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ if (vphy->sas_address != sas_address)
+ continue;
+ ioc_info(ioc,
+ "remove vphy entry: %p of port:%p,from %d port's vphys list\n",
+ vphy, port, port->port_id);
+ port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+ }
+
+ list_for_each_entry_safe(hba_port, hba_port_next,
+ &ioc->port_table_list, list) {
+ if (hba_port != port)
+ continue;
+ /*
+ * Delete hba_port object if
+ * - hba_port object's sas address matches with current
+ * removed device's sas address and no vphy's
+ * associated with it.
+ * - Current removed device is a vSES device and
+ * none of the other direct attached device have
+ * this vSES device's port number (hence hba_port
+ * object sas_address field will be zero).
+ */
+ if ((hba_port->sas_address == sas_address ||
+ !hba_port->sas_address) && !hba_port->vphys_mask) {
+ ioc_info(ioc,
+ "remove hba_port entry: %p port: %d from hba_port list\n",
+ hba_port, hba_port->port_id);
+ list_del(&hba_port->list);
+ kfree(hba_port);
+ } else if (hba_port->sas_address == sas_address &&
+ hba_port->vphys_mask) {
+ /*
+ * Current removed device is a non vSES device
+ * and a vSES device has the same port number
+ * as of current device's port number. Hence
+ * only clear the sas_address filed, don't
+ * delete the hba_port object.
+ */
+ ioc_info(ioc,
+ "clearing sas_address from hba_port entry: %p port: %d from hba_port list\n",
+ hba_port, hba_port->port_id);
+ port->sas_address = 0;
+ }
+ break;
+ }
+ }
+
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address == sas_address)
memset(&sas_node->phy[i].remote_identify, 0 ,
@@ -864,6 +1043,7 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
phy->maximum_linkrate = _transport_convert_phy_link_rate(
phy_pg0.ProgrammedLinkRate >> 4);
+ phy->hostdata = mpt3sas_phy->port;
if ((sas_phy_add(phy))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -934,6 +1114,7 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
phy->maximum_linkrate = _transport_convert_phy_link_rate(
expander_pg1.ProgrammedLinkRate >> 4);
+ phy->hostdata = mpt3sas_phy->port;
if ((sas_phy_add(phy))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -961,20 +1142,26 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
* @handle: attached device handle
* @phy_number: phy number
* @link_rate: new link rate
+ * @port: hba port entry
+ *
+ * Return nothing.
*/
void
mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate,
+ struct hba_port *port)
{
unsigned long flags;
struct _sas_node *sas_node;
struct _sas_phy *mpt3sas_phy;
+ struct hba_port *hba_port = NULL;
if (ioc->shost_recovery || ioc->pci_error_recovery)
return;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc,
+ sas_address, port);
if (!sas_node) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
@@ -986,8 +1173,19 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
_transport_set_identify(ioc, handle,
&mpt3sas_phy->remote_identify);
- _transport_add_phy_to_an_existing_port(ioc, sas_node,
- mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
+ if ((sas_node->handle <= ioc->sas_hba.num_phys) &&
+ (ioc->multipath_on_hba)) {
+ list_for_each_entry(hba_port,
+ &ioc->port_table_list, list) {
+ if (hba_port->sas_address == sas_address &&
+ hba_port == port)
+ hba_port->phy_mask |=
+ (1 << mpt3sas_phy->phy_id);
+ }
+ }
+ mpt3sas_transport_add_phy_to_an_existing_port(ioc, sas_node,
+ mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address,
+ port);
} else
memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
sas_identify));
@@ -1122,7 +1320,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = 0xFF;
+ mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy);
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
@@ -1212,10 +1410,13 @@ _transport_get_linkerrors(struct sas_phy *phy)
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
Mpi2SasPhyPage1_t phy_pg1;
+ struct hba_port *port = phy->hostdata;
+ int port_id = port->port_id;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
- phy->identify.sas_address) == NULL) {
+ phy->identify.sas_address,
+ mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
@@ -1265,8 +1466,7 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- rphy->identify.sas_address);
+ sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
if (sas_device) {
*identifier = sas_device->enclosure_logical_id;
rc = 0;
@@ -1295,8 +1495,7 @@ _transport_get_bay_identifier(struct sas_rphy *rphy)
int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __mpt3sas_get_sdev_by_addr(ioc,
- rphy->identify.sas_address);
+ sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
if (sas_device) {
rc = sas_device->slot;
sas_device_put(sas_device);
@@ -1417,7 +1616,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = 0xFF;
+ mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy);
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
@@ -1499,11 +1698,14 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
Mpi2SasIoUnitControlReply_t mpi_reply;
Mpi2SasIoUnitControlRequest_t mpi_request;
+ struct hba_port *port = phy->hostdata;
+ int port_id = port->port_id;
unsigned long flags;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
- phy->identify.sas_address) == NULL) {
+ phy->identify.sas_address,
+ mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
@@ -1556,10 +1758,13 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
int rc = 0;
unsigned long flags;
int i, discovery_active;
+ struct hba_port *port = phy->hostdata;
+ int port_id = port->port_id;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
- phy->identify.sas_address) == NULL) {
+ phy->identify.sas_address,
+ mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
@@ -1693,10 +1898,13 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
int i;
int rc = 0;
unsigned long flags;
+ struct hba_port *port = phy->hostdata;
+ int port_id = port->port_id;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
- phy->identify.sas_address) == NULL) {
+ phy->identify.sas_address,
+ mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) {
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return -EINVAL;
}
@@ -1898,7 +2106,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = 0xFF;
+ mpi_request->PhysicalPort = _transport_get_port_id_by_rphy(ioc, rphy);
mpi_request->SASAddress = (rphy) ?
cpu_to_le64(rphy->identify.sas_address) :
cpu_to_le64(ioc->sas_hba.sas_address);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h
new file mode 100644
index 000000000000..5f3328f011a2
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * to store diag trigger values into persistent driver triggers pages
+ * for MPT (Message Passing Technology) based controllers.
+ *
+ * Copyright (C) 2020 Broadcom Inc.
+ *
+ * Authors: Broadcom Inc.
+ * Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+ *
+ * Send feedback to : MPT-FusionLinux.pdl@broadcom.com)
+ */
+
+#include "mpi/mpi2_cnfg.h"
+
+#ifndef MPI2_TRIGGER_PAGES_H
+#define MPI2_TRIGGER_PAGES_H
+
+#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER (0xE0)
+#define MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION (0x01)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 TriggerFlags; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ U32 Reserved0xC[61]; /* 0x0C */
+} _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0, Mpi26DriverTriggerPage0_t;
+
+/* Trigger Flags */
+#define MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID (0x0001)
+#define MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID (0x0002)
+#define MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID (0x0004)
+#define MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID (0x0008)
+
+#define MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION (0x01)
+typedef struct _MPI26_DRIVER_MASTER_TIGGER_ENTRY {
+ U32 MasterTriggerFlags;
+} MPI26_DRIVER_MASTER_TIGGER_ENTRY;
+
+#define MPI26_MAX_MASTER_TRIGGERS (1)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 NumMasterTrigger; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ MPI26_DRIVER_MASTER_TIGGER_ENTRY MasterTriggers[MPI26_MAX_MASTER_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TIGGER_1, Mpi26DriverTriggerPage1_t;
+
+#define MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION (0x01)
+typedef struct _MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY {
+ U16 MPIEventCode; /* 0x00 */
+ U16 MPIEventCodeSpecific; /* 0x02 */
+} MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY;
+
+#define MPI26_MAX_MPI_EVENT_TRIGGERS (20)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 NumMPIEventTrigger; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY MPIEventTriggers[MPI26_MAX_MPI_EVENT_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TIGGER_2, Mpi26DriverTriggerPage2_t;
+
+#define MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION (0x01)
+typedef struct _MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY {
+ U8 ASCQ; /* 0x00 */
+ U8 ASC; /* 0x01 */
+ U8 SenseKey; /* 0x02 */
+ U8 Reserved; /* 0x03 */
+} MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY;
+
+#define MPI26_MAX_SCSI_SENSE_TRIGGERS (20)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 NumSCSISenseTrigger; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY SCSISenseTriggers[MPI26_MAX_SCSI_SENSE_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TIGGER_3, Mpi26DriverTriggerPage3_t;
+
+#define MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION (0x01)
+typedef struct _MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY {
+ U16 IOCStatus; /* 0x00 */
+ U16 Reserved; /* 0x02 */
+ U32 LogInfo; /* 0x04 */
+} MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY;
+
+#define MPI26_MAX_LOGINFO_TRIGGERS (20)
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 NumIOCStatusLogInfoTrigger; /* 0x08 */
+ U16 Reserved0xA; /* 0x0A */
+ MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY IOCStatusLoginfoTriggers[MPI26_MAX_LOGINFO_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TIGGER_4, Mpi26DriverTriggerPage4_t;
+
+#endif
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 0354898d7cac..71b6a1f834cd 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2296,7 +2296,6 @@ static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
break;
default:
return -1;
- break;
}
return 0;
@@ -2559,7 +2558,7 @@ static void mvumi_detach_one(struct pci_dev *pdev)
/**
* mvumi_shutdown - Shutdown entry point
- * @device: Generic device structure
+ * @pdev: PCI device structure
*/
static void mvumi_shutdown(struct pci_dev *pdev)
{
@@ -2568,49 +2567,28 @@ static void mvumi_shutdown(struct pci_dev *pdev)
mvumi_flush_cache(mhba);
}
-static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused mvumi_suspend(struct device *dev)
{
- struct mvumi_hba *mhba = NULL;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mvumi_hba *mhba = pci_get_drvdata(pdev);
- mhba = pci_get_drvdata(pdev);
mvumi_flush_cache(mhba);
- pci_set_drvdata(pdev, mhba);
mhba->instancet->disable_intr(mhba);
- free_irq(mhba->pdev->irq, mhba);
mvumi_unmap_pci_addr(pdev, mhba->base_addr);
- pci_release_regions(pdev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
+static int __maybe_unused mvumi_resume(struct device *dev)
{
int ret;
- struct mvumi_hba *mhba = NULL;
-
- mhba = pci_get_drvdata(pdev);
-
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(&pdev->dev, "enable device failed\n");
- return ret;
- }
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mvumi_hba *mhba = pci_get_drvdata(pdev);
- ret = mvumi_pci_set_master(pdev);
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
goto fail;
- ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
- if (ret)
- goto fail;
ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
if (ret)
goto release_regions;
@@ -2628,12 +2606,6 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
goto unmap_pci_addr;
}
- ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
- "mvumi", mhba);
- if (ret) {
- dev_err(&pdev->dev, "failed to register IRQ\n");
- goto unmap_pci_addr;
- }
mhba->instancet->enable_intr(mhba);
return 0;
@@ -2643,11 +2615,12 @@ unmap_pci_addr:
release_regions:
pci_release_regions(pdev);
fail:
- pci_disable_device(pdev);
return ret;
}
+static SIMPLE_DEV_PM_OPS(mvumi_pm_ops, mvumi_suspend, mvumi_resume);
+
static struct pci_driver mvumi_pci_driver = {
.name = MV_DRIVER_NAME,
@@ -2655,10 +2628,7 @@ static struct pci_driver mvumi_pci_driver = {
.probe = mvumi_probe_one,
.remove = mvumi_detach_one,
.shutdown = mvumi_shutdown,
-#ifdef CONFIG_PM
- .suspend = mvumi_suspend,
- .resume = mvumi_resume,
-#endif
+ .driver.pm = &mvumi_pm_ops,
};
module_pci_driver(mvumi_pci_driver);
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 5fa0f4ed6565..3d8e91c07dc7 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -194,7 +194,6 @@ static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
cb->qcmd(cb, cmd_blk);
spin_unlock_irqrestore(&cb->queue_lock, flags);
- WARN_ON(in_interrupt());
wait_for_completion(&cmpl);
return cmd_blk->status;
}
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 7a3ade765ce3..4adf9ded296a 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -136,7 +136,6 @@ static void myrs_exec_cmd(struct myrs_hba *cs,
myrs_qcmd(cs, cmd_blk);
spin_unlock_irqrestore(&cs->queue_lock, flags);
- WARN_ON(in_interrupt());
wait_for_completion(&complete);
}
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index bc5a623519e7..bb3b3884f968 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1102,8 +1102,6 @@ static irqreturn_t nspintr(int irq, void *dev_id)
nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN | AUTODIRECTION | ACKENB);
return IRQ_HANDLED;
- break;
-
case PH_RESELECT:
//nsp_dbg(NSP_DEBUG_INTR, "phase reselect");
// *sync_neg = SYNC_NOT_YET;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 3587f7c8a428..12035baf0997 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -841,10 +841,9 @@ static ssize_t pm8001_store_update_fw(struct device *cdev,
pm8001_ha->dev);
if (ret) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(
- "Failed to load firmware image file %s, error %d\n",
- filename_ptr, ret));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Failed to load firmware image file %s, error %d\n",
+ filename_ptr, ret);
pm8001_ha->fw_status = FAIL_OPEN_BIOS_FILE;
goto out;
}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 2b7b2954ec31..c8d4d87c5473 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -400,9 +400,9 @@ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
} while ((regVal != shiftValue) && time_before(jiffies, start));
if (regVal != shiftValue) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
- " = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT,
+ "TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW = 0x%x\n",
+ regVal);
return -1;
}
return 0;
@@ -416,7 +416,7 @@ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha,
u32 SSCbit)
{
- u32 value, offset, i;
+ u32 offset, i;
unsigned long flags;
#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
@@ -467,7 +467,7 @@ static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha,
so that the written value will be 0x8090c016.
This will ensure only down-spreading SSC is enabled on the SPC.
*************************************************************/
- value = pm8001_cr32(pm8001_ha, 2, 0xd8);
+ pm8001_cr32(pm8001_ha, 2, 0xd8);
pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
/*set the shifted destination address to 0x0 to avoid error operation */
@@ -623,12 +623,10 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, 0x44);
offset = value & 0x03FFFFFF;
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 Offset: %x\n", offset));
+ pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 Offset: %x\n", offset);
pcilogic = (value & 0xFC000000) >> 26;
pcibar = get_pci_bar_index(pcilogic);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+ pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar);
pm8001_ha->main_cfg_tbl_addr = base_addr =
pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
pm8001_ha->general_stat_tbl_addr =
@@ -652,16 +650,15 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
* as this is shared with BIOS data */
if (deviceid == 0x8081 || deviceid == 0x0042) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- GSM_SM_BASE));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Shift Bar4 to 0x%x failed\n",
+ GSM_SM_BASE);
return -1;
}
}
/* check the firmware status */
if (-1 == check_fw_ready(pm8001_ha)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Firmware is not ready!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n");
return -EBUSY;
}
@@ -686,8 +683,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
}
/* notify firmware update finished and check initialization status */
if (0 == mpi_init_check(pm8001_ha)) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MPI initialize successful!\n"));
+ pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n");
} else
return -EBUSY;
/*This register is a 16-bit timer with a resolution of 1us. This is the
@@ -709,9 +705,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
if (deviceid == 0x8081 || deviceid == 0x0042) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- GSM_SM_BASE));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Shift Bar4 to 0x%x failed\n",
+ GSM_SM_BASE);
return -1;
}
}
@@ -729,8 +725,8 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
} while ((value != 0) && (--max_wait_count));
if (!max_wait_count) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:IBDB value/=0x%x\n", value));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=0x%x\n",
+ value);
return -1;
}
@@ -747,9 +743,8 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
break;
} while (--max_wait_count);
if (!max_wait_count) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(" TIME OUT MPI State = 0x%x\n",
- gst_len_mpistate & GST_MPI_STATE_MASK));
+ pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n",
+ gst_len_mpistate & GST_MPI_STATE_MASK);
return -1;
}
return 0;
@@ -763,25 +758,23 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
{
u32 regVal, regVal1, regVal2;
if (mpi_uninit_check(pm8001_ha) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MPI state is not ready\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "MPI state is not ready\n");
return -1;
}
/* read the scratch pad 2 register bit 2 */
regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
& SCRATCH_PAD2_FWRDY_RST;
if (regVal == SCRATCH_PAD2_FWRDY_RST) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Firmware is ready for reset .\n"));
+ pm8001_dbg(pm8001_ha, INIT, "Firmware is ready for reset.\n");
} else {
unsigned long flags;
/* Trigger NMI twice via RB6 */
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- RB6_ACCESS_REG));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Shift Bar4 to 0x%x failed\n",
+ RB6_ACCESS_REG);
return -1;
}
pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET,
@@ -794,16 +787,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
if (regVal != SCRATCH_PAD2_FWRDY_RST) {
regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:MSGU_SCRATCH_PAD1"
- "=0x%x, MSGU_SCRATCH_PAD2=0x%x\n",
- regVal1, regVal2));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MSGU_SCRATCH_PAD1=0x%x, MSGU_SCRATCH_PAD2=0x%x\n",
+ regVal1, regVal2);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD0 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD3 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3));
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
@@ -828,7 +819,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* step1: Check FW is ready for soft reset */
if (soft_reset_ready_check(pm8001_ha) != 0) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("FW is not ready\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "FW is not ready\n");
return -1;
}
@@ -838,46 +829,43 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- MBIC_AAP1_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
+ MBIC_AAP1_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (IOP)= 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
/* map 0x70000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- MBIC_IOP_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
+ MBIC_IOP_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0);
regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCIE -Event Interrupt Enable = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "PCIE -Event Interrupt Enable = 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0);
regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCIE - Event Interrupt = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "PCIE - Event Interrupt = 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal);
regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCIE -Error Interrupt Enable = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "PCIE -Error Interrupt Enable = 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0);
regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCIE - Error Interrupt = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "PCIE - Error Interrupt = 0x%x\n", regVal);
pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal);
/* read the scratch pad 1 register bit 2 */
@@ -893,15 +881,13 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* map 0x0700000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- GSM_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
+ GSM_ADDR_BASE);
return -1;
}
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x0(0x00007b88)-GSM Configuration and"
- " Reset = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x0(0x00007b88)-GSM Configuration and Reset = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
/* step 3: host read GSM Configuration and Reset register */
regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
@@ -916,59 +902,52 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
regVal &= ~(0x00003b00);
/* host write GSM Configuration and Reset register */
pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM "
- "Configuration and Reset is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM Configuration and Reset is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
/* step 4: */
/* disable GSM - Read Address Parity Check */
regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700038 - Read Address Parity Check "
- "Enable = 0x%x\n", regVal1));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n",
+ regVal1);
pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
- "is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK));
/* disable GSM - Write Address Parity Check */
regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700040 - Write Address Parity Check"
- " Enable = 0x%x\n", regVal2));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700040 - Write Address Parity Check Enable = 0x%x\n",
+ regVal2);
pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700040 - Write Address Parity Check "
- "Enable is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK));
/* disable GSM - Write Data Parity Check */
regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x300048 - Write Data Parity Check"
- " Enable = 0x%x\n", regVal3));
+ pm8001_dbg(pm8001_ha, INIT, "GSM 0x300048 - Write Data Parity Check Enable = 0x%x\n",
+ regVal3);
pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x300048 - Write Data Parity Check Enable"
- "is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x300048 - Write Data Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK));
/* step 5: delay 10 usec */
udelay(10);
/* step 5-b: set GPIO-0 output control to tristate anyway */
if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- GPIO_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, INIT, "Shift Bar4 to 0x%x failed\n",
+ GPIO_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GPIO Output Control Register:"
- " = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "GPIO Output Control Register: = 0x%x\n",
+ regVal);
/* set GPIO-0 output control to tri-state */
regVal &= 0xFFFFFFFC;
pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
@@ -977,23 +956,20 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* map 0x00000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
- SPC_TOP_LEVEL_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n",
+ SPC_TOP_LEVEL_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Top Register before resetting IOP/AAP1"
- ":= 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting IOP/AAP1:= 0x%x\n",
+ regVal);
regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
/* step 7: Reset the BDMA/OSSP */
regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Top Register before resetting BDMA/OSSP"
- ": = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting BDMA/OSSP: = 0x%x\n",
+ regVal);
regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
@@ -1002,9 +978,9 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* step 9: bring the BDMA and OSSP out of reset */
regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Top Register before bringing up BDMA/OSSP"
- ":= 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Top Register before bringing up BDMA/OSSP:= 0x%x\n",
+ regVal);
regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
@@ -1015,14 +991,13 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
/* map 0x0700000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
- GSM_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n",
+ GSM_ADDR_BASE);
return -1;
}
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x0 (0x00007b88)-GSM Configuration and "
- "Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x0 (0x00007b88)-GSM Configuration and Reset = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
/* Put those bits to high */
/* GSM XCBI offset = 0x70 0000
@@ -1034,44 +1009,37 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
*/
regVal |= (GSM_CONFIG_RESET_VALUE);
pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM (0x00004088 ==> 0x00007b88) - GSM"
- " Configuration and Reset is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+ pm8001_dbg(pm8001_ha, INIT, "GSM (0x00004088 ==> 0x00007b88) - GSM Configuration and Reset is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
/* step 12: Restore GSM - Read Address Parity Check */
regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
/* just for debugging */
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
- " = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n",
+ regVal);
pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700038 - Read Address Parity"
- " Check Enable is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT, "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK));
/* Restore GSM - Write Address Parity Check */
regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700040 - Write Address Parity Check"
- " Enable is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK));
/* Restore GSM - Write Data Parity Check */
regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GSM 0x700048 - Write Data Parity Check Enable"
- "is set to = 0x%x\n",
- pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "GSM 0x700048 - Write Data Parity Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK));
/* step 13: bring the IOP and AAP1 out of reset */
/* map 0x00000 to BAR4(0x20), BAR2(win) */
if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Shift Bar4 to 0x%x failed\n",
- SPC_TOP_LEVEL_ADDR_BASE));
+ pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
+ SPC_TOP_LEVEL_ADDR_BASE);
return -1;
}
regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
@@ -1094,22 +1062,20 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (!max_wait_count) {
regVal = pm8001_cr32(pm8001_ha, 0,
MSGU_SCRATCH_PAD_1);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT : ToggleVal 0x%x,"
- "MSGU_SCRATCH_PAD1 = 0x%x\n",
- toggleVal, regVal));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_0)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD2 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_2)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_3)));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT : ToggleVal 0x%x,MSGU_SCRATCH_PAD1 = 0x%x\n",
+ toggleVal, regVal);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD0 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD2 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_2));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SCRATCH_PAD3 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_3));
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
@@ -1124,22 +1090,22 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (check_fw_ready(pm8001_ha) == -1) {
regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
/* return error if MPI Configuration Table not ready */
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("FW not ready SCRATCH_PAD1"
- " = 0x%x\n", regVal));
+ pm8001_dbg(pm8001_ha, INIT,
+ "FW not ready SCRATCH_PAD1 = 0x%x\n",
+ regVal);
regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
/* return error if MPI Configuration Table not ready */
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("FW not ready SCRATCH_PAD2"
- " = 0x%x\n", regVal));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_0)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
- pm8001_cr32(pm8001_ha, 0,
- MSGU_SCRATCH_PAD_3)));
+ pm8001_dbg(pm8001_ha, INIT,
+ "FW not ready SCRATCH_PAD2 = 0x%x\n",
+ regVal);
+ pm8001_dbg(pm8001_ha, INIT,
+ "SCRATCH_PAD0 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, INIT,
+ "SCRATCH_PAD3 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_3));
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
@@ -1147,8 +1113,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
pm8001_bar4_shift(pm8001_ha, 0);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SPC soft reset Complete\n"));
+ pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n");
return 0;
}
@@ -1156,8 +1121,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
{
u32 i;
u32 regVal;
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip reset start\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip reset start\n");
/* do SPC chip reset. */
regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
@@ -1181,8 +1145,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
mdelay(1);
} while ((--i) != 0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip reset finished\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n");
}
/**
@@ -1356,12 +1319,18 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
{
u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
void *pMessage;
-
- if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
- &pMessage) < 0) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("No free mpi buffer\n"));
- return -ENOMEM;
+ unsigned long flags;
+ int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
+ int rv = -1;
+
+ WARN_ON(q_index >= PM8001_MAX_INB_NUM);
+ spin_lock_irqsave(&circularQ->iq_lock, flags);
+ rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
+ &pMessage);
+ if (rv < 0) {
+ pm8001_dbg(pm8001_ha, IO, "No free mpi buffer\n");
+ rv = -ENOMEM;
+ goto done;
}
if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr)))
@@ -1380,11 +1349,13 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
/*Update the PI to the firmware*/
pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
circularQ->pi_offset, circularQ->producer_idx);
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
- responseQueue, opCode, circularQ->producer_idx,
- circularQ->consumer_index));
- return 0;
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
+ responseQueue, opCode, circularQ->producer_idx,
+ circularQ->consumer_index);
+done:
+ spin_unlock_irqrestore(&circularQ->iq_lock, flags);
+ return rv;
}
u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
@@ -1398,17 +1369,17 @@ u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
circularQ->consumer_idx * pm8001_ha->iomb_size);
if (pOutBoundMsgHeader != msgHeader) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("consumer_idx = %d msgHeader = %p\n",
- circularQ->consumer_idx, msgHeader));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "consumer_idx = %d msgHeader = %p\n",
+ circularQ->consumer_idx, msgHeader);
/* Update the producer index from SPC */
producer_index = pm8001_read_32(circularQ->pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("consumer_idx = %d producer_index = %d"
- "msgHeader = %p\n", circularQ->consumer_idx,
- circularQ->producer_index, msgHeader));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "consumer_idx = %d producer_index = %dmsgHeader = %p\n",
+ circularQ->consumer_idx,
+ circularQ->producer_index, msgHeader);
return 0;
}
/* free the circular queue buffer elements associated with the message*/
@@ -1420,9 +1391,8 @@ u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
/* Update the producer index from SPC*/
producer_index = pm8001_read_32(circularQ->pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk(" CI=%d PI=%d\n", circularQ->consumer_idx,
- circularQ->producer_index));
+ pm8001_dbg(pm8001_ha, IO, " CI=%d PI=%d\n",
+ circularQ->consumer_idx, circularQ->producer_index);
return 0;
}
@@ -1452,10 +1422,10 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
/* read header */
header_tmp = pm8001_read_32(msgHeader);
msgHeader_tmp = cpu_to_le32(header_tmp);
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "outbound opcode msgheader:%x ci=%d pi=%d\n",
- msgHeader_tmp, circularQ->consumer_idx,
- circularQ->producer_index));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "outbound opcode msgheader:%x ci=%d pi=%d\n",
+ msgHeader_tmp, circularQ->consumer_idx,
+ circularQ->producer_index);
if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
if (OPC_OUB_SKIP_ENTRY !=
(le32_to_cpu(msgHeader_tmp) & 0xfff)) {
@@ -1464,12 +1434,11 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
sizeof(struct mpi_msg_hdr);
*pBC = (u8)((le32_to_cpu(msgHeader_tmp)
>> 24) & 0x1f);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk(": CI=%d PI=%d "
- "msgHeader=%x\n",
- circularQ->consumer_idx,
- circularQ->producer_index,
- msgHeader_tmp));
+ pm8001_dbg(pm8001_ha, IO,
+ ": CI=%d PI=%d msgHeader=%x\n",
+ circularQ->consumer_idx,
+ circularQ->producer_index,
+ msgHeader_tmp);
return MPI_IO_STATUS_SUCCESS;
} else {
circularQ->consumer_idx =
@@ -1578,17 +1547,15 @@ void pm8001_work_fn(struct work_struct *work)
ts->stat = SAS_QUEUE_FULL;
pm8001_dev = ccb->device;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
spin_lock_irqsave(&t->task_state_lock, flags1);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p"
- " done with event 0x%x resp 0x%x stat 0x%x but"
- " aborted by upper layer!\n",
- t, pw->handler, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, pw->handler, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
} else {
@@ -1608,26 +1575,16 @@ void pm8001_work_fn(struct work_struct *work)
unsigned long flags, flags1;
int i, ret = 0;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ret = pm8001_query_task(t);
- PM8001_IO_DBG(pm8001_ha,
- switch (ret) {
- case TMF_RESP_FUNC_SUCC:
- pm8001_printk("...Task on lu\n");
- break;
-
- case TMF_RESP_FUNC_COMPLETE:
- pm8001_printk("...Task NOT on lu\n");
- break;
-
- default:
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "...query task failed!!!\n"));
- break;
- });
+ if (ret == TMF_RESP_FUNC_SUCC)
+ pm8001_dbg(pm8001_ha, IO, "...Task on lu\n");
+ else if (ret == TMF_RESP_FUNC_COMPLETE)
+ pm8001_dbg(pm8001_ha, IO, "...Task NOT on lu\n");
+ else
+ pm8001_dbg(pm8001_ha, DEVIO, "...query task failed!!!\n");
spin_lock_irqsave(&pm8001_ha->lock, flags);
@@ -1672,8 +1629,7 @@ void pm8001_work_fn(struct work_struct *work)
break;
default: /* device misbehavior */
ret = TMF_RESP_FUNC_FAILED;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("...Reset phy\n"));
+ pm8001_dbg(pm8001_ha, IO, "...Reset phy\n");
pm8001_I_T_nexus_reset(dev);
break;
}
@@ -1687,15 +1643,14 @@ void pm8001_work_fn(struct work_struct *work)
default: /* device misbehavior */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
ret = TMF_RESP_FUNC_FAILED;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("...Reset phy\n"));
+ pm8001_dbg(pm8001_ha, IO, "...Reset phy\n");
pm8001_I_T_nexus_reset(dev);
}
if (ret == TMF_RESP_FUNC_FAILED)
t = NULL;
pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Complete\n"));
+ pm8001_dbg(pm8001_ha, IO, "...Complete\n");
} break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
dev = pm8001_dev->sas_device;
@@ -1749,15 +1704,14 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
int ret;
if (!pm8001_ha_dev) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "dev is null\n");
return;
}
task = sas_alloc_slow_task(GFP_ATOMIC);
if (!task) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
- "allocate task\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
}
@@ -1802,8 +1756,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
task = sas_alloc_slow_task(GFP_ATOMIC);
if (!task) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("cannot allocate task !!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
return;
}
task->task_done = pm8001_task_done;
@@ -1811,8 +1764,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res) {
sas_free_task(task);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("cannot allocate tag !!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n");
return;
}
@@ -1823,8 +1775,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
if (!dev) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Domain device cannot be allocated\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Domain device cannot be allocated\n");
return;
}
task->dev = dev;
@@ -1901,27 +1853,25 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
if (status && status != IO_UNDERFLOW)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sas IO status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n",
+ SAS_ADDR(t->dev->sas_addr));
if (status)
- PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
- "status:0x%x, tag:0x%x, task:0x%p\n",
- status, tag, t));
+ pm8001_dbg(pm8001_ha, IOERR,
+ "status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t);
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
- ",param = %d\n", param));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS,param = %d\n",
+ param);
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_GOOD;
@@ -1933,69 +1883,63 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
sas_ssp_task_response(pm8001_ha->dev, t, iu);
}
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
break;
case IO_UNDERFLOW:
/* SSP Completion with error */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW"
- ",param = %d\n", param));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW,param = %d\n",
+ param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
/* Force the midlayer to retry */
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -2005,68 +1949,59 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_DMA:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (!t->uldd_task)
@@ -2075,51 +2010,44 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_DS_NON_OPERATIONAL);
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_TM_TAG_NOT_FOUND:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
break;
}
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("scsi_status = %x\n ",
- psspPayload->ssp_resp_iu.status));
+ pm8001_dbg(pm8001_ha, IO, "scsi_status = %x\n",
+ psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
- " io_status 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -2148,60 +2076,52 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sas IO status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("port_id = %x,device_id = %x\n",
- port_id, dev_id));
+ pm8001_dbg(pm8001_ha, DEVIO, "port_id = %x,device_id = %x\n",
+ port_id, dev_id);
switch (event) {
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
return;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
- "_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -2211,88 +2131,78 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
return;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_OVERRUN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_CMD_FRAME_ISSUED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
return;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
@@ -2304,10 +2214,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
- " event 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -2343,8 +2251,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
tag = le32_to_cpu(psataPayload->tag);
if (!tag) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("tag null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
return;
}
ccb = &pm8001_ha->ccb_info[tag];
@@ -2353,8 +2260,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("ccb null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
return;
}
@@ -2362,29 +2268,26 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (t->dev && (t->dev->lldd_dev))
pm8001_dev = t->dev->lldd_dev;
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task null\n");
return;
}
if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
&& unlikely(!t || !t->lldd_task || !t->dev)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task or dev null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
}
ts = &t->task_status;
if (!ts) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("ts null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
return;
}
if (status)
- PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
- "status:0x%x, tag:0x%x, task::0x%p\n",
- status, tag, t));
+ pm8001_dbg(pm8001_ha, IOERR,
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t);
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
@@ -2416,19 +2319,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
& 0xff000000)) +
pm8001_dev->attached_phy +
0x10);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%08x%08x", temp_sata_addr_hi,
- temp_sata_addr_low));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SAS Address of IO Failure Drive:%08x%08x\n",
+ temp_sata_addr_hi,
+ temp_sata_addr_low);
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SAS Address of IO Failure Drive:%016llx\n",
+ SAS_ADDR(t->dev->sas_addr));
}
}
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_GOOD;
@@ -2450,99 +2353,102 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
ts->residual = param;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
- param));
+ pm8001_dbg(pm8001_ha, IO,
+ "SAS_PROTO_RESPONSE len = %d\n",
+ param);
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("PIO read len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO,
+ "PIO read len = %d\n", len);
} else if (t->ata_task.use_ncq) {
len = sizeof(struct set_dev_bits_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("FPDMA len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
+ len);
} else {
len = sizeof(struct dev_to_host_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("other len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO, "other len = %d\n",
+ len);
}
if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
resp->frame_len = len;
memcpy(&resp->ending_fis[0], sata_resp, len);
ts->buf_valid_size = sizeof(*resp);
} else
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("response to large\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "response too large\n");
}
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
/* following cases are to do cases */
case IO_UNDERFLOW:
/* SATA Completion with error */
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
- "_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2556,8 +2462,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
@@ -2572,17 +2478,15 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_STP_RESOURCES"
- "_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2596,57 +2500,65 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_DMA:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_REJECTED_NCQ_MODE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2659,14 +2571,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk(" IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, " IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_IN_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2679,18 +2591,21 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
@@ -2699,10 +2614,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p done with io_status 0x%x"
- " resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -2731,12 +2645,10 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("No CCB !!!. returning\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
}
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SATA EVENT 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
/* Check if this is NCQ error */
if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
@@ -2752,61 +2664,54 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sata IO status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "sata IO status 0x%x\n", event);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
- port_id, dev_id, tag, event));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
+ port_id, dev_id, tag, event);
switch (event) {
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
- "_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2820,94 +2725,82 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_PEER_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_REJECTED_NCQ_MODE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_RDY_OVERRUN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_CMD_FRAME_ISSUED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
break;
case IO_XFER_PIO_SETUP_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
@@ -2919,10 +2812,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p done with io_status 0x%x"
- " resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -2952,86 +2844,79 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts = &t->task_status;
pm8001_dev = ccb->device;
if (status) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("smp IO status 0x%x\n", status));
- PM8001_IOERR_DBG(pm8001_ha,
- pm8001_printk("status:0x%x, tag:0x%x, task:0x%p\n",
- status, tag, t));
+ pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status);
+ pm8001_dbg(pm8001_ha, IOERR,
+ "status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t);
}
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_GOOD;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PHY_DOWN;
break;
case IO_ERROR_HW_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_BUSY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -3040,76 +2925,67 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
- "NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_RX_FRAME:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_ERROR_INTERNAL_SMP_RESOURCE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
/* not allowed case. Therefore, return failed status */
@@ -3121,10 +2997,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
- " io_status 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -3146,9 +3020,8 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
u32 device_id = le32_to_cpu(pPayload->device_id);
u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
- "from 0x%x to 0x%x status = 0x%x!\n",
- device_id, pds, nds, status));
+ pm8001_dbg(pm8001_ha, MSG, "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n",
+ device_id, pds, nds, status);
complete(pm8001_dev->setds_completion);
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
@@ -3163,10 +3036,9 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
complete(pm8001_ha->nvmd_completion);
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set nvm data complete!\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n");
if ((dlen_status & NVMD_STAT) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Set nvm data error!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error!\n");
return;
}
ccb->task = NULL;
@@ -3188,26 +3060,22 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
fw_control_context = ccb->fw_control_context;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Get nvm data complete!\n");
if ((dlen_status & NVMD_STAT) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Get nvm data error!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error!\n");
complete(pm8001_ha->nvmd_completion);
return;
}
if (ir_tds_bn_dps_das_nvm & IPMode) {
/* indirect mode - IR bit set */
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Get NVMD success, IR=1\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Get NVMD success, IR=1\n");
if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) {
if (ir_tds_bn_dps_das_nvm == 0x80a80200) {
memcpy(pm8001_ha->sas_addr,
((u8 *)virt_addr + 4),
SAS_ADDR_SIZE);
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Get SAS address"
- " from VPD successfully!\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Get SAS address from VPD successfully!\n");
}
} else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM)
|| ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) ||
@@ -3218,14 +3086,14 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
;
} else {
/* Should not be happened*/
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("(IR=1)Wrong Device type 0x%x\n",
- ir_tds_bn_dps_das_nvm));
+ pm8001_dbg(pm8001_ha, MSG,
+ "(IR=1)Wrong Device type 0x%x\n",
+ ir_tds_bn_dps_das_nvm);
}
} else /* direct mode */{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
- (dlen_status & NVMD_LEN) >> 24));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Get NVMD success, IR=0, dataLen=%d\n",
+ (dlen_status & NVMD_LEN) >> 24);
}
/* Though fw_control_context is freed below, usrAddr still needs
* to be updated as this holds the response to the request function
@@ -3234,10 +3102,15 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_ha->memoryMap.region[NVMD].virt_ptr,
fw_control_context->len);
kfree(ccb->fw_control_context);
+ /* To avoid race condition, complete should be
+ * called after the message is copied to
+ * fw_control_context->usrAddr
+ */
+ complete(pm8001_ha->nvmd_completion);
+ pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n");
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
pm8001_tag_free(pm8001_ha, tag);
- complete(pm8001_ha->nvmd_completion);
}
int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3250,13 +3123,13 @@ int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
tag = le32_to_cpu(pPayload->tag);
if (status != 0) {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("%x phy execute %x phy op failed!\n",
- phy_id, phy_op));
+ pm8001_dbg(pm8001_ha, MSG,
+ "%x phy execute %x phy op failed!\n",
+ phy_id, phy_op);
} else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("%x phy execute %x phy op success!\n",
- phy_id, phy_op));
+ pm8001_dbg(pm8001_ha, MSG,
+ "%x phy execute %x phy op success!\n",
+ phy_id, phy_op);
pm8001_ha->phy[phy_id].reset_success = true;
}
if (pm8001_ha->phy[phy_id].enable_completion) {
@@ -3303,7 +3176,7 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
} else if (phy->phy_type & PORT_TYPE_SATA) {
/*Nothing*/
}
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("phy %d byte dmaded.\n", i));
+ pm8001_dbg(pm8001_ha, MSG, "phy %d byte dmaded.\n", i);
sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
@@ -3426,37 +3299,34 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
u8 deviceType = pPayload->sas_identify.dev_type;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPC;
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
- port_id, phy_id));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
+ port_id, phy_id);
switch (deviceType) {
case SAS_PHY_UNUSED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("device type no device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "device type no device.\n");
break;
case SAS_END_DEVICE:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "end device.\n");
pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
PHY_NOTIFY_ENABLE_SPINUP);
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
case SAS_EDGE_EXPANDER_DEVICE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("expander device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "expander device.\n");
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("fanout expander device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n");
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("unknown device type(%x)\n", deviceType));
+ pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n",
+ deviceType);
break;
}
phy->phy_type |= PORT_TYPE_SAS;
@@ -3502,9 +3372,8 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
- " phy id = %d\n", port_id, phy_id));
+ pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n",
+ port_id, phy_id);
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPC;
port->port_attached = 1;
@@ -3552,37 +3421,35 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
case PORT_VALID:
break;
case PORT_INVALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" PortInvalid portID %d\n", port_id));
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Last phy Down and port invalid\n"));
+ pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n",
+ port_id);
+ pm8001_dbg(pm8001_ha, MSG,
+ " Last phy Down and port invalid\n");
port->port_attached = 0;
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
break;
case PORT_IN_RESET:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Port In Reset portID %d\n", port_id));
+ pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n",
+ port_id);
break;
case PORT_NOT_ESTABLISHED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ " phy Down and PORT_NOT_ESTABLISHED\n");
port->port_attached = 0;
break;
case PORT_LOSTCOMM:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Last phy Down and port invalid\n"));
+ pm8001_dbg(pm8001_ha, MSG, " phy Down and PORT_LOSTCOMM\n");
+ pm8001_dbg(pm8001_ha, MSG,
+ " Last phy Down and port invalid\n");
port->port_attached = 0;
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
break;
default:
port->port_attached = 0;
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk(" phy Down and(default) = %x\n",
- portstate));
+ pm8001_dbg(pm8001_ha, DEVIO, " phy Down and(default) = %x\n",
+ portstate);
break;
}
@@ -3613,44 +3480,42 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dev = ccb->device;
status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id);
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" register device is status = %d\n", status));
+ pm8001_dbg(pm8001_ha, MSG, " register device is status = %d\n",
+ status);
switch (status) {
case DEVREG_SUCCESS:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n");
pm8001_dev->device_id = device_id;
break;
case DEVREG_FAILURE_OUT_OF_RESOURCE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_OUT_OF_RESOURCE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_OUT_OF_RESOURCE\n");
break;
case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n");
break;
case DEVREG_FAILURE_INVALID_PHY_ID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_INVALID_PHY_ID\n"));
+ pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_INVALID_PHY_ID\n");
break;
case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n");
break;
case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n");
break;
case DEVREG_FAILURE_PORT_NOT_VALID_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_PORT_NOT_VALID_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_PORT_NOT_VALID_STATE\n");
break;
case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n");
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n");
break;
}
complete(pm8001_dev->dcompletion);
@@ -3670,9 +3535,9 @@ int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id);
if (status != 0)
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" deregister device failed ,status = %x"
- ", device_id = %x\n", status, device_id));
+ pm8001_dbg(pm8001_ha, MSG,
+ " deregister device failed ,status = %x, device_id = %x\n",
+ status, device_id);
return 0;
}
@@ -3692,44 +3557,37 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
status = le32_to_cpu(ppayload->status);
switch (status) {
case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ ": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n");
break;
case FLASH_UPDATE_IN_PROGRESS:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_IN_PROGRESS\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_IN_PROGRESS\n");
break;
case FLASH_UPDATE_HDR_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_HDR_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HDR_ERR\n");
break;
case FLASH_UPDATE_OFFSET_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_OFFSET_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_OFFSET_ERR\n");
break;
case FLASH_UPDATE_CRC_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_CRC_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_CRC_ERR\n");
break;
case FLASH_UPDATE_LENGTH_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_LENGTH_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_LENGTH_ERR\n");
break;
case FLASH_UPDATE_HW_ERR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_HW_ERR\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HW_ERR\n");
break;
case FLASH_UPDATE_DNLD_NOT_SUPPORTED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ ": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n");
break;
case FLASH_UPDATE_DISABLED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
+ pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_DISABLED\n");
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("No matched status = %d\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "No matched status = %d\n",
+ status);
break;
}
kfree(ccb->fw_control_context);
@@ -3747,12 +3605,11 @@ int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
struct general_event_resp *pPayload =
(struct general_event_resp *)(piomb + 4);
status = le32_to_cpu(pPayload->status);
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" status = 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, MSG, " status = 0x%x\n", status);
for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++)
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("inb_IOMB_payload[0x%x] 0x%x,\n", i,
- pPayload->inb_IOMB_payload[i]));
+ pm8001_dbg(pm8001_ha, MSG, "inb_IOMB_payload[0x%x] 0x%x,\n",
+ i,
+ pPayload->inb_IOMB_payload[i]);
return 0;
}
@@ -3772,8 +3629,7 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
status = le32_to_cpu(pPayload->status);
tag = le32_to_cpu(pPayload->tag);
if (!tag) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(" TAG NULL. RETURNING !!!"));
+ pm8001_dbg(pm8001_ha, FAIL, " TAG NULL. RETURNING !!!\n");
return -1;
}
@@ -3783,23 +3639,21 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dev = ccb->device; /* retrieve device */
if (!t) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(" TASK NULL. RETURNING !!!"));
+ pm8001_dbg(pm8001_ha, FAIL, " TASK NULL. RETURNING !!!\n");
return -1;
}
ts = &t->task_status;
if (status != 0)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task abort failed status 0x%x ,"
- "tag = 0x%x, scp= 0x%x\n", status, tag, scp));
+ pm8001_dbg(pm8001_ha, FAIL, "task abort failed status 0x%x ,tag = 0x%x, scp= 0x%x\n",
+ status, tag, scp);
switch (status) {
case IO_SUCCESS:
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_GOOD;
break;
case IO_NOT_VALID:
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n"));
+ pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n");
ts->resp = TMF_RESP_FUNC_FAILED;
break;
}
@@ -3844,14 +3698,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
- port_id, phy_id, eventType, status));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
+ port_id, phy_id, eventType, status);
switch (eventType) {
case HW_EVENT_PHY_START_STATUS:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_START_STATUS"
- " status = %x\n", status));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n",
+ status);
if (status == 0) {
phy->phy_state = 1;
if (pm8001_ha->flags == PM8001F_RUN_TIME &&
@@ -3860,38 +3713,32 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
}
break;
case HW_EVENT_SAS_PHY_UP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n");
hw_event_sas_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_PHY_UP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n");
hw_event_sata_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_PHY_STOP_STATUS:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_STOP_STATUS "
- "status = %x\n", status));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_STOP_STATUS status = %x\n",
+ status);
if (status == 0)
phy->phy_state = 0;
break;
case HW_EVENT_SATA_SPINUP_HOLD:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
break;
case HW_EVENT_PHY_DOWN:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
phy->phy_attached = 0;
phy->phy_state = 0;
hw_event_phy_down(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_INVALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
@@ -3899,8 +3746,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
/* the broadcast change primitive received, tell the LIBSAS this event
to revalidate the sas domain*/
case HW_EVENT_BROADCAST_CHANGE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n");
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
port_id, phy_id, 1, 0);
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
@@ -3909,23 +3755,21 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
break;
case HW_EVENT_PHY_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
sas_phy_disconnected(&phy->sas_phy);
phy->phy_attached = 0;
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
break;
case HW_EVENT_BROADCAST_EXP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
break;
case HW_EVENT_LINK_ERR_INVALID_DWORD:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_INVALID_DWORD\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
@@ -3933,8 +3777,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_DISPARITY_ERROR,
port_id, phy_id, 0, 0);
@@ -3943,8 +3787,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_CODE_VIOLATION\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_CODE_VIOLATION,
port_id, phy_id, 0, 0);
@@ -3953,8 +3797,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
port_id, phy_id, 0, 0);
@@ -3963,39 +3807,34 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_MALFUNCTION:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
break;
case HW_EVENT_BROADCAST_SES:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
break;
case HW_EVENT_INBOUND_CRC_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_INBOUND_CRC_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_HARD_RESET_RECEIVED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
break;
case HW_EVENT_ID_FRAME_TIMEOUT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n");
pm8001_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
port_id, phy_id, 0, 0);
@@ -4004,34 +3843,30 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_PORT_RECOVER:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
break;
case HW_EVENT_PORT_RESET_COMPLETE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n");
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n");
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown event type = %x\n", eventType));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type = %x\n",
+ eventType);
break;
}
return 0;
@@ -4047,163 +3882,132 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
__le32 pHeader = *(__le32 *)piomb;
u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
+ pm8001_dbg(pm8001_ha, MSG, "process_one_iomb:\n");
switch (opc) {
case OPC_OUB_ECHO:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n");
break;
case OPC_OUB_HW_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_HW_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n");
mpi_hw_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n");
mpi_ssp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_SMP_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SMP_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n");
mpi_smp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_LOCAL_PHY_CNTRL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n");
pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
break;
case OPC_OUB_DEV_REGIST:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n");
pm8001_mpi_reg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEREG_DEV:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("unregister the device\n"));
+ pm8001_dbg(pm8001_ha, MSG, "unregister the device\n");
pm8001_mpi_dereg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEV_HANDLE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n");
break;
case OPC_OUB_SATA_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n");
mpi_sata_completion(pm8001_ha, piomb);
break;
case OPC_OUB_SATA_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n");
mpi_sata_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n");
mpi_ssp_event(pm8001_ha, piomb);
break;
case OPC_OUB_DEV_HANDLE_ARRIV:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n");
/*This is for target*/
break;
case OPC_OUB_SSP_RECV_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n");
/*This is for target*/
break;
case OPC_OUB_DEV_INFO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_INFO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_INFO\n");
break;
case OPC_OUB_FW_FLASH_UPDATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n");
pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GPIO_RESPONSE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n");
break;
case OPC_OUB_GPIO_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n");
break;
case OPC_OUB_GENERAL_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n");
pm8001_mpi_general_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SATA_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SAS_DIAG_MODE_START_END:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SAS_DIAG_MODE_START_END\n");
break;
case OPC_OUB_SAS_DIAG_EXECUTE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n");
break;
case OPC_OUB_GET_TIME_STAMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n");
break;
case OPC_OUB_SAS_HW_EVENT_ACK:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n");
break;
case OPC_OUB_PORT_CONTROL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n");
break;
case OPC_OUB_SMP_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_NVMD_DATA:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n");
pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SET_NVMD_DATA:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n");
pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEVICE_HANDLE_REMOVAL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n");
break;
case OPC_OUB_SET_DEVICE_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n");
pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEVICE_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n");
break;
case OPC_OUB_SET_DEV_INFO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n");
break;
case OPC_OUB_SAS_RE_INITIALIZE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_RE_INITIALIZE\n");
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
- opc));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "Unknown outbound Queue IOMB OPC = %x\n",
+ opc);
break;
}
}
@@ -4416,19 +4220,19 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
circularQ = &pm8001_ha->inbnd_q_tbl[0];
if (task->data_dir == DMA_NONE) {
ATAP = 0x04; /* no data*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+ pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "DMA\n");
} else {
ATAP = 0x05; /* PIO*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
+ pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
if (task->ata_task.use_ncq &&
dev->sata_dev.class != ATA_DEV_ATAPI) {
ATAP = 0x07; /* FPDMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
}
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
@@ -4485,10 +4289,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p resp 0x%x "
- " stat 0x%x but aborted by upper layer "
- "\n", task, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
+ task, ts->resp,
+ ts->stat);
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
@@ -4637,8 +4441,8 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id);
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("unregister device device_id = %d\n", device_id));
+ pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n",
+ device_id);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
return ret;
@@ -4690,9 +4494,9 @@ static irqreturn_t
pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm8001_chip_interrupt_disable(pm8001_ha, vec);
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "irq vec %d, ODMR:0x%x\n",
- vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30));
process_oq(pm8001_ha, vec);
pm8001_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -4729,9 +4533,8 @@ int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
{
u32 opc, device_id;
int rc = TMF_RESP_FUNC_FAILED;
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
- cmd_tag, task_tag));
+ pm8001_dbg(pm8001_ha, EH, "cmd_tag = %x, abort task tag = 0x%x\n",
+ cmd_tag, task_tag);
if (pm8001_dev->dev_type == SAS_END_DEVICE)
opc = OPC_INB_SSP_ABORT;
else if (pm8001_dev->dev_type == SAS_SATA_DEV)
@@ -4742,7 +4545,7 @@ int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
rc = send_task_abort(pm8001_ha, opc, device_id, flag,
task_tag, cmd_tag);
if (rc != TMF_RESP_FUNC_COMPLETE)
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc));
+ pm8001_dbg(pm8001_ha, EH, "rc= %d\n", rc);
return rc;
}
@@ -5008,8 +4811,9 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context)
return -ENOMEM;
fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "dma fw_control context input length :%x\n", fw_control->len));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "dma fw_control context input length :%x\n",
+ fw_control->len);
memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3cf3e58b6979..ee2de177d0d0 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -271,15 +271,14 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
spin_lock_init(&pm8001_ha->lock);
spin_lock_init(&pm8001_ha->bitmap_lock);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("pm8001_alloc: PHY:%x\n",
- pm8001_ha->chip->n_phy));
+ pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
+ pm8001_ha->chip->n_phy);
/* Setup Interrupt */
rc = pm8001_setup_irq(pm8001_ha);
if (rc) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "pm8001_setup_irq failed [ret: %d]\n", rc));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "pm8001_setup_irq failed [ret: %d]\n", rc);
goto err_out_shost;
}
/* Request Interrupt */
@@ -387,17 +386,17 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
for (i = 0; i < pm8001_ha->max_memcnt; i++) {
+ struct mpi_mem *region = &pm8001_ha->memoryMap.region[i];
+
if (pm8001_mem_alloc(pm8001_ha->pdev,
- &pm8001_ha->memoryMap.region[i].virt_ptr,
- &pm8001_ha->memoryMap.region[i].phys_addr,
- &pm8001_ha->memoryMap.region[i].phys_addr_hi,
- &pm8001_ha->memoryMap.region[i].phys_addr_lo,
- pm8001_ha->memoryMap.region[i].total_len,
- pm8001_ha->memoryMap.region[i].alignment) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Mem%d alloc failed\n",
- i));
- goto err_out;
+ &region->virt_ptr,
+ &region->phys_addr,
+ &region->phys_addr_hi,
+ &region->phys_addr_lo,
+ region->total_len,
+ region->alignment) != 0) {
+ pm8001_dbg(pm8001_ha, FAIL, "Mem%d alloc failed\n", i);
+ goto err_out;
}
}
@@ -412,7 +411,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
pm8001_ha->devices[i].id = i;
pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
- pm8001_ha->devices[i].running_req = 0;
+ atomic_set(&pm8001_ha->devices[i].running_req, 0);
}
pm8001_ha->flags = PM8001F_INIT_TIME;
/* Initialize tags */
@@ -467,15 +466,15 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->io_mem[logicalBar].memvirtaddr =
ioremap(pm8001_ha->io_mem[logicalBar].membase,
pm8001_ha->io_mem[logicalBar].memsize);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PCI: bar %d, logicalBar %d ",
- bar, logicalBar));
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "base addr %llx virt_addr=%llx len=%d\n",
- (u64)pm8001_ha->io_mem[logicalBar].membase,
- (u64)(unsigned long)
- pm8001_ha->io_mem[logicalBar].memvirtaddr,
- pm8001_ha->io_mem[logicalBar].memsize));
+ pm8001_dbg(pm8001_ha, INIT,
+ "PCI: bar %d, logicalBar %d\n",
+ bar, logicalBar);
+ pm8001_dbg(pm8001_ha, INIT,
+ "base addr %llx virt_addr=%llx len=%d\n",
+ (u64)pm8001_ha->io_mem[logicalBar].membase,
+ (u64)(unsigned long)
+ pm8001_ha->io_mem[logicalBar].memvirtaddr,
+ pm8001_ha->io_mem[logicalBar].memsize);
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
pm8001_ha->io_mem[logicalBar].memsize = 0;
@@ -520,8 +519,8 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
else {
pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 |
LINKRATE_60 | LINKRATE_120;
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "Setting link rate to default value\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Setting link rate to default value\n");
}
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
/* IOMB size is 128 for 8088/89 controllers */
@@ -684,13 +683,13 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
payload.offset = 0;
payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL);
if (!payload.func_specific) {
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("mem alloc fail\n"));
+ pm8001_dbg(pm8001_ha, INIT, "mem alloc fail\n");
return;
}
rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
if (rc) {
kfree(payload.func_specific);
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
+ pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n");
return;
}
wait_for_completion(&completion);
@@ -718,9 +717,8 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
sas_add[7] = sas_add[7] + 4;
memcpy(&pm8001_ha->phy[i].dev_sas_addr,
sas_add, SAS_ADDR_SIZE);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("phy %d sas_addr = %016llx\n", i,
- pm8001_ha->phy[i].dev_sas_addr));
+ pm8001_dbg(pm8001_ha, INIT, "phy %d sas_addr = %016llx\n", i,
+ pm8001_ha->phy[i].dev_sas_addr);
}
kfree(payload.func_specific);
#else
@@ -760,7 +758,7 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
if (rc) {
kfree(payload.func_specific);
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
+ pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n");
return -ENOMEM;
}
wait_for_completion(&completion);
@@ -854,9 +852,9 @@ void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask)
break;
default:
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Unknown subsystem device=0x%.04x",
- pm8001_ha->pdev->subsystem_device));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Unknown subsystem device=0x%.04x\n",
+ pm8001_ha->pdev->subsystem_device);
}
}
@@ -950,9 +948,9 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
/* Maximum queue number updating in HBA structure */
pm8001_ha->max_q_num = number_of_intr;
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "pci_alloc_irq_vectors request ret:%d no of intr %d\n",
- rc, pm8001_ha->number_of_intr));
+ pm8001_dbg(pm8001_ha, INIT,
+ "pci_alloc_irq_vectors request ret:%d no of intr %d\n",
+ rc, pm8001_ha->number_of_intr);
return 0;
}
@@ -964,9 +962,9 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
if (pm8001_ha->chip_id != chip_8001)
flag &= ~IRQF_SHARED;
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("pci_enable_msix request number of intr %d\n",
- pm8001_ha->number_of_intr));
+ pm8001_dbg(pm8001_ha, INIT,
+ "pci_enable_msix request number of intr %d\n",
+ pm8001_ha->number_of_intr);
for (i = 0; i < pm8001_ha->number_of_intr; i++) {
snprintf(pm8001_ha->intr_drvname[i],
@@ -1002,8 +1000,7 @@ static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
#ifdef PM8001_USE_MSIX
if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
return pm8001_setup_msix(pm8001_ha);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MSIX not supported!!!\n"));
+ pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
#endif
return 0;
}
@@ -1023,8 +1020,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
if (pdev->msix_cap && pci_msi_enabled())
return pm8001_request_msix(pm8001_ha);
else {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MSIX not supported!!!\n"));
+ pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
goto intx;
}
#endif
@@ -1108,8 +1104,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "chip_init failed [ret: %d]\n", rc));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "chip_init failed [ret: %d]\n", rc);
goto err_out_ha_free;
}
@@ -1131,14 +1127,15 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
pm8001_init_sas_add(pm8001_ha);
/* phy setting support for motherboard controller */
- if (pm8001_configure_phy_settings(pm8001_ha))
+ rc = pm8001_configure_phy_settings(pm8001_ha);
+ if (rc)
goto err_out_shost;
pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
if (rc) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "sas_register_ha failed [ret: %d]\n", rc));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "sas_register_ha failed [ret: %d]\n", rc);
goto err_out_shost;
}
list_add_tail(&pm8001_ha->list, &hba_list);
@@ -1187,11 +1184,11 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
goto err_out;
/* Memory region for ccb_info*/
- pm8001_ha->ccb_info = (struct pm8001_ccb_info *)
+ pm8001_ha->ccb_info =
kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL);
if (!pm8001_ha->ccb_info) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk
- ("Unable to allocate memory for ccb\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Unable to allocate memory for ccb\n");
goto err_out_noccb;
}
for (i = 0; i < ccb_count; i++) {
@@ -1199,8 +1196,8 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
&pm8001_ha->ccb_info[i].ccb_dma_handle);
if (!pm8001_ha->ccb_info[i].buf_prd) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk
- ("pm80xx: ccb prd memory allocation error\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "pm80xx: ccb prd memory allocation error\n");
goto err_out;
}
pm8001_ha->ccb_info[i].task = NULL;
@@ -1257,23 +1254,21 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
/**
* pm8001_pci_suspend - power management suspend main entry point
- * @pdev: PCI device struct
- * @state: PM state change to (usually PCI_D3)
+ * @dev: Device struct
*
* Returns 0 success, anything else error.
*/
-static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pm8001_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
- struct pm8001_hba_info *pm8001_ha;
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
int i, j;
- u32 device_state;
- pm8001_ha = sha->lldd_ha;
sas_suspend_ha(sha);
flush_workqueue(pm8001_wq);
scsi_block_requests(pm8001_ha->shost);
if (!pdev->pm_cap) {
- dev_err(&pdev->dev, " PCI PM not supported\n");
+ dev_err(dev, " PCI PM not supported\n");
return -ENODEV;
}
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
@@ -1296,24 +1291,21 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
tasklet_kill(&pm8001_ha->tasklet[j]);
#endif
- device_state = pci_choose_state(pdev, state);
- pm8001_printk("pdev=0x%p, slot=%s, entering "
- "operating state [D%d]\n", pdev,
- pm8001_ha->name, device_state);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, device_state);
+ pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, entering "
+ "suspended state\n", pdev,
+ pm8001_ha->name);
return 0;
}
/**
* pm8001_pci_resume - power management resume main entry point
- * @pdev: PCI device struct
+ * @dev: Device struct
*
* Returns 0 success, anything else error.
*/
-static int pm8001_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused pm8001_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
int rc;
@@ -1323,20 +1315,9 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
pm8001_ha = sha->lldd_ha;
device_state = pdev->current_state;
- pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
- "operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
+ pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, resuming from previous operating state [D%d]\n",
+ pdev, pm8001_ha->name, device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- rc = pci_enable_device(pdev);
- if (rc) {
- pm8001_printk("slot=%s Enable device failed during resume\n",
- pm8001_ha->name);
- goto err_out_enable;
- }
-
- pci_set_master(pdev);
rc = pci_go_44(pdev);
if (rc)
goto err_out_disable;
@@ -1344,8 +1325,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
/* chip soft rst only for spc */
if (pm8001_ha->chip_id == chip_8001) {
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip soft reset successful\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip soft reset successful\n");
}
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc)
@@ -1397,8 +1377,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
err_out_disable:
scsi_remove_host(pm8001_ha->shost);
- pci_disable_device(pdev);
-err_out_enable:
+
return rc;
}
@@ -1481,13 +1460,16 @@ static struct pci_device_id pm8001_pci_table[] = {
{} /* terminate list */
};
+static SIMPLE_DEV_PM_OPS(pm8001_pci_pm_ops,
+ pm8001_pci_suspend,
+ pm8001_pci_resume);
+
static struct pci_driver pm8001_pci_driver = {
.name = DRV_NAME,
.id_table = pm8001_pci_table,
.probe = pm8001_pci_probe,
.remove = pm8001_pci_remove,
- .suspend = pm8001_pci_suspend,
- .resume = pm8001_pci_resume,
+ .driver.pm = &pm8001_pci_pm_ops,
};
/**
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 9889bab7d31c..d1e9dba2ef19 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -250,8 +250,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return 0;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("func 0x%x\n", func));
+ pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
rc = -EOPNOTSUPP;
}
msleep(300);
@@ -405,7 +404,7 @@ static int pm8001_task_exec(struct sas_task *task,
t->task_done(t);
return 0;
}
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
+ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
spin_lock_irqsave(&pm8001_ha->lock, flags);
do {
dev = t->dev;
@@ -456,9 +455,11 @@ static int pm8001_task_exec(struct sas_task *task,
ccb->device = pm8001_dev;
switch (task_proto) {
case SAS_PROTOCOL_SMP:
+ atomic_inc(&pm8001_dev->running_req);
rc = pm8001_task_prep_smp(pm8001_ha, ccb);
break;
case SAS_PROTOCOL_SSP:
+ atomic_inc(&pm8001_dev->running_req);
if (is_tmf)
rc = pm8001_task_prep_ssp_tm(pm8001_ha,
ccb, tmf);
@@ -467,6 +468,7 @@ static int pm8001_task_exec(struct sas_task *task,
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
+ atomic_inc(&pm8001_dev->running_req);
rc = pm8001_task_prep_ata(pm8001_ha, ccb);
break;
default:
@@ -477,15 +479,14 @@ static int pm8001_task_exec(struct sas_task *task,
}
if (rc) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("rc is %x\n", rc));
+ pm8001_dbg(pm8001_ha, IO, "rc is %x\n", rc);
+ atomic_dec(&pm8001_dev->running_req);
goto err_out_tag;
}
/* TODO: select normal or high priority */
spin_lock(&t->task_state_lock);
t->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock(&t->task_state_lock);
- pm8001_dev->running_req++;
} while (0);
rc = 0;
goto out_done;
@@ -567,9 +568,9 @@ static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
}
}
if (dev == PM8001_MAX_DEVICES) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("max support %d devices, ignore ..\n",
- PM8001_MAX_DEVICES));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "max support %d devices, ignore ..\n",
+ PM8001_MAX_DEVICES);
}
return NULL;
}
@@ -587,8 +588,7 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
return &pm8001_ha->devices[dev];
}
if (dev == PM8001_MAX_DEVICES) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
- "DEVICE FOUND !!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
}
return NULL;
}
@@ -649,10 +649,10 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
}
}
if (phy_id == parent_dev->ex_dev.num_phys) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Error: no attached dev:%016llx"
- " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr),
- SAS_ADDR(parent_dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Error: no attached dev:%016llx at ex:%016llx.\n",
+ SAS_ADDR(dev->sas_addr),
+ SAS_ADDR(parent_dev->sas_addr));
res = -1;
}
} else {
@@ -662,7 +662,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
flag = 1; /* directly sata */
}
} /*register this device to HBA*/
- PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
+ pm8001_dbg(pm8001_ha, DISC, "Found device\n");
PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
wait_for_completion(&completion);
@@ -734,9 +734,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
if (res) {
del_timer(&task->slow_task->timer);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Executing internal task "
- "failed\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
goto ex_err;
}
wait_for_completion(&task->slow_task->completion);
@@ -750,9 +748,9 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TMF task[%x]timeout.\n",
- tmf->tmf));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "TMF task[%x]timeout.\n",
+ tmf->tmf);
goto ex_err;
}
}
@@ -773,17 +771,15 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_OVERRUN) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Blocked task error.\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Blocked task error.\n");
res = -EMSGSIZE;
break;
} else {
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk(" Task to dev %016llx response:"
- "0x%x status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat));
+ pm8001_dbg(pm8001_ha, EH,
+ " Task to dev %016llx response:0x%x status 0x%x\n",
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
sas_free_task(task);
task = NULL;
}
@@ -830,9 +826,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
if (res) {
del_timer(&task->slow_task->timer);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Executing internal task "
- "failed\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
goto ex_err;
}
wait_for_completion(&task->slow_task->completion);
@@ -840,8 +834,8 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TMF task timeout.\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "TMF task timeout.\n");
goto ex_err;
}
}
@@ -852,12 +846,11 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
break;
} else {
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk(" Task to dev %016llx response: "
- "0x%x status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat));
+ pm8001_dbg(pm8001_ha, EH,
+ " Task to dev %016llx response: 0x%x status 0x%x\n",
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
sas_free_task(task);
task = NULL;
}
@@ -883,22 +876,20 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
if (pm8001_dev) {
u32 device_id = pm8001_dev->device_id;
- PM8001_DISC_DBG(pm8001_ha,
- pm8001_printk("found dev[%d:%x] is gone.\n",
- pm8001_dev->device_id, pm8001_dev->dev_type));
- if (pm8001_dev->running_req) {
+ pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
+ pm8001_dev->device_id, pm8001_dev->dev_type);
+ if (atomic_read(&pm8001_dev->running_req)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
- while (pm8001_dev->running_req)
+ while (atomic_read(&pm8001_dev->running_req))
msleep(20);
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
pm8001_free_dev(pm8001_dev);
} else {
- PM8001_DISC_DBG(pm8001_ha,
- pm8001_printk("Found dev has gone.\n"));
+ pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
}
dev->lldd_dev = NULL;
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -916,7 +907,7 @@ static int pm8001_issue_ssp_tmf(struct domain_device *dev,
if (!(dev->tproto & SAS_PROTOCOL_SSP))
return TMF_RESP_FUNC_ESUPP;
- strncpy((u8 *)&ssp_task.LUN, lun, 8);
+ memcpy((u8 *)&ssp_task.LUN, lun, 8);
return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
tmf);
}
@@ -968,7 +959,7 @@ void pm8001_open_reject_retry(
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
spin_lock_irqsave(&task->task_state_lock, flags1);
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
@@ -1018,9 +1009,9 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
}
rc = sas_phy_reset(phy, 1);
if (rc) {
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("phy reset failed for device %x\n"
- "with rc %d\n", pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH,
+ "phy reset failed for device %x\n"
+ "with rc %d\n", pm8001_dev->device_id, rc);
rc = TMF_RESP_FUNC_FAILED;
goto out;
}
@@ -1028,17 +1019,16 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
if (rc) {
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("task abort failed %x\n"
- "with rc %d\n", pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
+ "with rc %d\n", pm8001_dev->device_id, rc);
rc = TMF_RESP_FUNC_FAILED;
}
} else {
rc = sas_phy_reset(phy, 1);
msleep(2000);
}
- PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
- pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
+ pm8001_dev->device_id, rc);
out:
sas_put_local_phy(phy);
return rc;
@@ -1061,8 +1051,7 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("I_T_Nexus handler invoked !!"));
+ pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
phy = sas_get_local_phy(dev);
@@ -1101,8 +1090,8 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
rc = sas_phy_reset(phy, 1);
msleep(2000);
}
- PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
- pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
+ pm8001_dev->device_id, rc);
out:
sas_put_local_phy(phy);
@@ -1131,8 +1120,8 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
}
/* If failed, fall-through I_T_Nexus reset */
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n",
- pm8001_dev->device_id, rc));
+ pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
+ pm8001_dev->device_id, rc);
return rc;
}
@@ -1140,7 +1129,6 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
int pm8001_query_task(struct sas_task *task)
{
u32 tag = 0xdeadbeef;
- int i = 0;
struct scsi_lun lun;
struct pm8001_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
@@ -1159,10 +1147,7 @@ int pm8001_query_task(struct sas_task *task)
rc = TMF_RESP_FUNC_FAILED;
return rc;
}
- PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:["));
- for (i = 0; i < 16; i++)
- printk(KERN_INFO "%02x ", cmnd->cmnd[i]);
- printk(KERN_INFO "]\n");
+ pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
tmf_task.tmf = TMF_QUERY_TASK;
tmf_task.tag_of_task_to_be_managed = tag;
@@ -1170,15 +1155,14 @@ int pm8001_query_task(struct sas_task *task)
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("The task is still in Lun\n"));
+ pm8001_dbg(pm8001_ha, EH,
+ "The task is still in Lun\n");
break;
/* The task is not in Lun or failed, reset the phy */
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("The task is not in Lun or failed,"
- " reset the phy\n"));
+ pm8001_dbg(pm8001_ha, EH,
+ "The task is not in Lun or failed, reset the phy\n");
break;
}
}
@@ -1207,7 +1191,7 @@ int pm8001_abort_task(struct sas_task *task)
phy_id = pm8001_dev->attached_phy;
ret = pm8001_find_tag(task, &tag);
if (ret == 0) {
- pm8001_printk("no tag for task:%p\n", task);
+ pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
return TMF_RESP_FUNC_FAILED;
}
spin_lock_irqsave(&task->task_state_lock, flags);
@@ -1264,8 +1248,8 @@ int pm8001_abort_task(struct sas_task *task)
* leaking the task in libsas or losing the race and
* getting a double free.
*/
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Waiting for local phy ctl\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Waiting for local phy ctl\n");
ret = wait_for_completion_timeout(&completion,
PM8001_TASK_TIMEOUT * HZ);
if (!ret || !phy->reset_success) {
@@ -1275,8 +1259,8 @@ int pm8001_abort_task(struct sas_task *task)
/* 3. Wait for Port Reset complete or
* Port reset TMO
*/
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Waiting for Port reset\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Waiting for Port reset\n");
ret = wait_for_completion_timeout(
&completion_reset,
PM8001_TASK_TIMEOUT * HZ);
@@ -1329,7 +1313,7 @@ out:
task->slow_task = NULL;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (rc != TMF_RESP_FUNC_COMPLETE)
- pm8001_printk("rc= %d\n", rc);
+ pm8001_info(pm8001_ha, "rc= %d\n", rc);
return rc;
}
@@ -1355,9 +1339,8 @@ int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("I_T_L_Q clear task set[%x]\n",
- pm8001_dev->device_id));
+ pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
+ pm8001_dev->device_id);
tmf_task.tmf = TMF_CLEAR_TASK_SET;
return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 95663e138083..f2c8cbad3853 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -69,45 +69,16 @@
#define PM8001_DEV_LOGGING 0x80 /* development message logging */
#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
-#define pm8001_printk(format, arg...) pr_info("%s:: %s %d:" \
- format, pm8001_ha->name, __func__, __LINE__, ## arg)
-#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
-do { \
- if (unlikely(HBA->logging_level & LEVEL)) \
- do { \
- CMD; \
- } while (0); \
-} while (0);
-#define PM8001_EH_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_EH_LOGGING, CMD)
+#define pm8001_info(HBA, fmt, ...) \
+ pr_info("%s:: %s %d:" fmt, \
+ (HBA)->name, __func__, __LINE__, ##__VA_ARGS__)
-#define PM8001_INIT_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_INIT_LOGGING, CMD)
-
-#define PM8001_DISC_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_DISC_LOGGING, CMD)
-
-#define PM8001_IO_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_IO_LOGGING, CMD)
-
-#define PM8001_FAIL_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_FAIL_LOGGING, CMD)
-
-#define PM8001_IOCTL_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_IOCTL_LOGGING, CMD)
-
-#define PM8001_MSG_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
-
-#define PM8001_DEV_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_DEV_LOGGING, CMD)
-
-#define PM8001_DEVIO_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_DEVIO_LOGGING, CMD)
-
-#define PM8001_IOERR_DBG(HBA, CMD) \
- PM8001_CHECK_LOGGING(HBA, PM8001_IOERR_LOGGING, CMD)
+#define pm8001_dbg(HBA, level, fmt, ...) \
+do { \
+ if (unlikely((HBA)->logging_level & PM8001_##level##_LOGGING)) \
+ pm8001_info(HBA, fmt, ##__VA_ARGS__); \
+} while (0)
#define PM8001_USE_TASKLET
#define PM8001_USE_MSIX
@@ -293,7 +264,7 @@ struct pm8001_device {
struct completion *dcompletion;
struct completion *setds_completion;
u32 device_id;
- u32 running_req;
+ atomic_t running_req;
};
struct pm8001_prd_imt {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 7593f248afb2..6772b0924dac 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -58,9 +58,8 @@ int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER);
} while ((reg_val != shift_value) && time_before(jiffies, start));
if (reg_val != shift_value) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
- " = 0x%x\n", reg_val));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MEMBASE_II_SHIFT_REGISTER = 0x%x\n",
+ reg_val);
return -1;
}
return 0;
@@ -109,8 +108,8 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
}
/* initialize variables for very first call from host application */
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "forensic_info TYPE_NON_FATAL..............\n");
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
@@ -123,17 +122,13 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd);
pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("ossaHwCB: status1 %d\n", status));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("ossaHwCB: read_len 0x%x\n",
- pm8001_ha->forensic_info.data_buf.read_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("ossaHwCB: direct_len 0x%x\n",
- pm8001_ha->forensic_info.data_buf.direct_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("ossaHwCB: direct_offset 0x%x\n",
- pm8001_ha->forensic_info.data_buf.direct_offset));
+ pm8001_dbg(pm8001_ha, IO, "ossaHwCB: status1 %d\n", status);
+ pm8001_dbg(pm8001_ha, IO, "ossaHwCB: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len);
+ pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len);
+ pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset);
}
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
@@ -153,29 +148,24 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
*/
length_to_read =
accum_len - pm8001_ha->forensic_preserved_accumulated_transfer;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: accum_len 0x%x\n", accum_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: length_to_read 0x%x\n",
- length_to_read));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: last_offset 0x%x\n",
- pm8001_ha->forensic_last_offset));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: read_len 0x%x\n",
- pm8001_ha->forensic_info.data_buf.read_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv:: direct_len 0x%x\n",
- pm8001_ha->forensic_info.data_buf.direct_len));
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv:: direct_offset 0x%x\n",
- pm8001_ha->forensic_info.data_buf.direct_offset));
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: accum_len 0x%x\n",
+ accum_len);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: length_to_read 0x%x\n",
+ length_to_read);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: last_offset 0x%x\n",
+ pm8001_ha->forensic_last_offset);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len);
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset);
/* If accumulated length failed to read correctly fail the attempt.*/
if (accum_len == 0xFFFFFFFF) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("Possible PCI issue 0x%x not expected\n",
- accum_len));
+ pm8001_dbg(pm8001_ha, IO,
+ "Possible PCI issue 0x%x not expected\n",
+ accum_len);
return status;
}
/* If accumulated length is zero fail the attempt */
@@ -239,8 +229,8 @@ moreData:
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv:return1 0x%x\n", offset));
+ pm8001_dbg(pm8001_ha, IO,
+ "get_fatal_spcv:return1 0x%x\n", offset);
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -262,8 +252,8 @@ moreData:
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv:return2 0x%x\n", offset));
+ pm8001_dbg(pm8001_ha, IO,
+ "get_fatal_spcv:return2 0x%x\n", offset);
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -289,8 +279,8 @@ moreData:
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: return3 0x%x\n", offset));
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return3 0x%x\n",
+ offset);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
@@ -327,9 +317,9 @@ moreData:
} while ((reg_val) && time_before(jiffies, start));
if (reg_val != 0) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
- reg_val));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
+ reg_val);
/* Fail the dump if a timeout occurs */
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(
@@ -351,9 +341,9 @@ moreData:
time_before(jiffies, start));
if (reg_val < 2) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
- reg_val));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
+ reg_val);
/* Fail the dump if a timeout occurs */
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(
@@ -387,8 +377,7 @@ moreData:
}
offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("get_fatal_spcv: return4 0x%x\n", offset));
+ pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return4 0x%x\n", offset);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
@@ -419,8 +408,7 @@ ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
PAGE_SIZE, "Not supported for SPC controller");
return 0;
}
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("forensic_info TYPE_NON_FATAL...\n"));
+ pm8001_dbg(pm8001_ha, IO, "forensic_info TYPE_NON_FATAL...\n");
/*
* Step 1: Write the host buffer parameters in the MPI Fatal and
* Non-Fatal Error Dump Capture Table.This is the buffer
@@ -581,24 +569,24 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
-
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset));
-
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Main cfg table; ila rev:%x Inactive fw rev:%x\n",
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev);
+
+ pm8001_dbg(pm8001_ha, DEV,
+ "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset);
+
+ pm8001_dbg(pm8001_ha, DEV,
+ "Main cfg table; ila rev:%x Inactive fw rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version);
}
/**
@@ -808,10 +796,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
- pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
- pm8001_ha->inbnd_q_tbl[i].pi_offset));
+ pm8001_dbg(pm8001_ha, DEV,
+ "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
+ pm8001_ha->inbnd_q_tbl[i].pi_offset);
}
for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
@@ -841,10 +829,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
- pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
- pm8001_ha->outbnd_q_tbl[i].ci_offset));
+ pm8001_dbg(pm8001_ha, DEV,
+ "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
+ pm8001_ha->outbnd_q_tbl[i].ci_offset);
}
}
@@ -878,9 +866,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
((pm8001_ha->max_q_num - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Updated Fatal error interrupt vector 0x%x\n",
- pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Updated Fatal error interrupt vector 0x%x\n",
+ pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT));
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
@@ -891,9 +879,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Programming DW 0x21 in main cfg table with 0x%x\n",
- pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Programming DW 0x21 in main cfg table with 0x%x\n",
+ pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET));
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
@@ -934,20 +922,20 @@ static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "IQ %d: Element pri size 0x%x\n",
- number,
- pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt));
+ pm8001_dbg(pm8001_ha, DEV,
+ "IQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
- pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
- pm8001_ha->inbnd_q_tbl[number].lower_base_addr));
+ pm8001_dbg(pm8001_ha, DEV,
+ "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "CI upper base addr 0x%x CI lower base addr 0x%x\n",
- pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
- pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr));
+ pm8001_dbg(pm8001_ha, DEV,
+ "CI upper base addr 0x%x CI lower base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
}
/**
@@ -973,20 +961,20 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "OQ %d: Element pri size 0x%x\n",
- number,
- pm8001_ha->outbnd_q_tbl[number].element_size_cnt));
+ pm8001_dbg(pm8001_ha, DEV,
+ "OQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
- pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
- pm8001_ha->outbnd_q_tbl[number].lower_base_addr));
+ pm8001_dbg(pm8001_ha, DEV,
+ "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "PI upper base addr 0x%x PI lower base addr 0x%x\n",
- pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
- pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr));
+ pm8001_dbg(pm8001_ha, DEV,
+ "PI upper base addr 0x%x PI lower base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
}
/**
@@ -1016,8 +1004,9 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
if (!max_wait_count) {
/* additional check */
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "Inb doorbell clear not toggled[value:%x]\n", value));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Inb doorbell clear not toggled[value:%x]\n",
+ value);
return -EBUSY;
}
/* check the MPI-State for initialization upto 100ms*/
@@ -1042,6 +1031,7 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
/**
* check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * This function sleeps hence it must not be used in atomic context.
* @pm8001_ha: our hba card information
*/
static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
@@ -1052,73 +1042,73 @@ static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
int ret = 0;
/* reset / PCIe ready */
- max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */
+ max_wait_time = max_wait_count = 5; /* 100 milli sec */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
} while ((value == 0xFFFFFFFF) && (--max_wait_count));
/* check ila status */
- max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */
+ max_wait_time = max_wait_count = 50; /* 1000 milli sec */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
} while (((value & SCRATCH_PAD_ILA_READY) !=
SCRATCH_PAD_ILA_READY) && (--max_wait_count));
if (!max_wait_count)
ret = -1;
else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" ila ready status in %d millisec\n",
- (max_wait_time - max_wait_count)));
+ pm8001_dbg(pm8001_ha, MSG,
+ " ila ready status in %d millisec\n",
+ (max_wait_time - max_wait_count));
}
/* check RAAE status */
- max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */
+ max_wait_time = max_wait_count = 90; /* 1800 milli sec */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
} while (((value & SCRATCH_PAD_RAAE_READY) !=
SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
if (!max_wait_count)
ret = -1;
else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" raae ready status in %d millisec\n",
- (max_wait_time - max_wait_count)));
+ pm8001_dbg(pm8001_ha, MSG,
+ " raae ready status in %d millisec\n",
+ (max_wait_time - max_wait_count));
}
/* check iop0 status */
- max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */
+ max_wait_time = max_wait_count = 30; /* 600 milli sec */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
} while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
(--max_wait_count));
if (!max_wait_count)
ret = -1;
else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" iop0 ready status in %d millisec\n",
- (max_wait_time - max_wait_count)));
+ pm8001_dbg(pm8001_ha, MSG,
+ " iop0 ready status in %d millisec\n",
+ (max_wait_time - max_wait_count));
}
/* check iop1 status only for 16 port controllers */
if ((pm8001_ha->chip_id != chip_8008) &&
(pm8001_ha->chip_id != chip_8009)) {
/* 200 milli sec */
- max_wait_time = max_wait_count = 200 * 1000;
+ max_wait_time = max_wait_count = 10;
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
} while (((value & SCRATCH_PAD_IOP1_READY) !=
SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
if (!max_wait_count)
ret = -1;
else {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "iop1 ready status in %d millisec\n",
- (max_wait_time - max_wait_count)));
+ pm8001_dbg(pm8001_ha, MSG,
+ "iop1 ready status in %d millisec\n",
+ (max_wait_time - max_wait_count));
}
}
@@ -1136,13 +1126,11 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
- PM8001_DEV_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
- offset, value));
+ pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n",
+ offset, value);
pcilogic = (value & 0xFC000000) >> 26;
pcibar = get_pci_bar_index(pcilogic);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+ pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar);
pm8001_ha->main_cfg_tbl_addr = base_addr =
pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
pm8001_ha->general_stat_tbl_addr =
@@ -1164,33 +1152,25 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) &
0xFFFFFF);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("GST OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("INBND OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("OBND OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("IVT OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PSPA OFFSET 0x%x\n",
- pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("addr - main cfg %p general status %p\n",
- pm8001_ha->main_cfg_tbl_addr,
- pm8001_ha->general_stat_tbl_addr));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("addr - inbnd %p obnd %p\n",
- pm8001_ha->inbnd_q_tbl_addr,
- pm8001_ha->outbnd_q_tbl_addr));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("addr - pspa %p ivt %p\n",
- pm8001_ha->pspa_q_tbl_addr,
- pm8001_ha->ivt_tbl_addr));
+ pm8001_dbg(pm8001_ha, INIT, "GST OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x18));
+ pm8001_dbg(pm8001_ha, INIT, "INBND OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C));
+ pm8001_dbg(pm8001_ha, INIT, "OBND OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x20));
+ pm8001_dbg(pm8001_ha, INIT, "IVT OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C));
+ pm8001_dbg(pm8001_ha, INIT, "PSPA OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x90));
+ pm8001_dbg(pm8001_ha, INIT, "addr - main cfg %p general status %p\n",
+ pm8001_ha->main_cfg_tbl_addr,
+ pm8001_ha->general_stat_tbl_addr);
+ pm8001_dbg(pm8001_ha, INIT, "addr - inbnd %p obnd %p\n",
+ pm8001_ha->inbnd_q_tbl_addr,
+ pm8001_ha->outbnd_q_tbl_addr);
+ pm8001_dbg(pm8001_ha, INIT, "addr - pspa %p ivt %p\n",
+ pm8001_ha->pspa_q_tbl_addr,
+ pm8001_ha->ivt_tbl_addr);
}
/**
@@ -1224,9 +1204,9 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
(THERMAL_ENABLE << 8) | page_code;
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
- payload.cfg_pg[0], payload.cfg_pg[1]));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
+ payload.cfg_pg[0], payload.cfg_pg[1]);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
@@ -1281,32 +1261,24 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
| SAS_COPNRJT_RTRY_THR;
SASConfigPage.MAX_AIP = SAS_MAX_AIP;
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.pageCode "
- "0x%08x\n", SASConfigPage.pageCode));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.MST_MSI "
- " 0x%08x\n", SASConfigPage.MST_MSI));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
- " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.STP_FRM_TMO "
- " 0x%08x\n", SASConfigPage.STP_FRM_TMO));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.STP_IDLE_TMO "
- " 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
- " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
- " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
- " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
- " 0x%08x\n", SASConfigPage.MAX_AIP));
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n",
+ SASConfigPage.pageCode);
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n",
+ SASConfigPage.MST_MSI);
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n",
+ SASConfigPage.STP_SSP_MCT_TMO);
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n",
+ SASConfigPage.STP_FRM_TMO);
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n",
+ SASConfigPage.STP_IDLE_TMO);
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n",
+ SASConfigPage.OPNRJT_RTRY_INTVL);
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n",
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO);
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n",
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR);
+ pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n",
+ SASConfigPage.MAX_AIP);
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
@@ -1346,18 +1318,18 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
SCRATCH_PAD3_SMB_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
pm8001_ha->encrypt_info.status = 0;
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
- "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
- scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
- pm8001_ha->encrypt_info.sec_mode,
- pm8001_ha->encrypt_info.status));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X.Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
+ scratch3_value,
+ pm8001_ha->encrypt_info.cipher_mode,
+ pm8001_ha->encrypt_info.sec_mode,
+ pm8001_ha->encrypt_info.status);
ret = 0;
} else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
SCRATCH_PAD3_ENC_DISABLED) {
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
- scratch3_value));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
+ scratch3_value);
pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
pm8001_ha->encrypt_info.cipher_mode = 0;
pm8001_ha->encrypt_info.sec_mode = 0;
@@ -1377,12 +1349,12 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
SCRATCH_PAD3_SMB_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
- "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
- scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
- pm8001_ha->encrypt_info.sec_mode,
- pm8001_ha->encrypt_info.status));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+ scratch3_value,
+ pm8001_ha->encrypt_info.cipher_mode,
+ pm8001_ha->encrypt_info.sec_mode,
+ pm8001_ha->encrypt_info.status);
} else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
SCRATCH_PAD3_ENC_ENA_ERR) {
@@ -1400,12 +1372,12 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
SCRATCH_PAD3_SMB_ENABLED)
pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
- "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
- scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
- pm8001_ha->encrypt_info.sec_mode,
- pm8001_ha->encrypt_info.status));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+ scratch3_value,
+ pm8001_ha->encrypt_info.cipher_mode,
+ pm8001_ha->encrypt_info.sec_mode,
+ pm8001_ha->encrypt_info.status);
}
return ret;
}
@@ -1435,9 +1407,9 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
KEK_MGMT_SUBOP_KEYCARDUPDATE);
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "Saving Encryption info to flash. payload 0x%x\n",
- payload.new_curidx_ksop));
+ pm8001_dbg(pm8001_ha, DEV,
+ "Saving Encryption info to flash. payload 0x%x\n",
+ payload.new_curidx_ksop);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
@@ -1458,8 +1430,7 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
/* check the firmware status */
if (-1 == check_fw_ready(pm8001_ha)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Firmware is not ready!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n");
return -EBUSY;
}
@@ -1483,8 +1454,7 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
}
/* notify firmware update finished and check initialization status */
if (0 == mpi_init_check(pm8001_ha)) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("MPI initialize successful!\n"));
+ pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n");
} else
return -EBUSY;
@@ -1493,16 +1463,13 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
/* Check for encryption */
if (pm8001_ha->chip->encrypt) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Checking for encryption\n"));
+ pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n");
ret = pm80xx_get_encrypt_info(pm8001_ha);
if (ret == -1) {
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("Encryption error !!\n"));
+ pm8001_dbg(pm8001_ha, INIT, "Encryption error !!\n");
if (pm8001_ha->encrypt_info.status == 0x81) {
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "Encryption enabled with error."
- "Saving encryption key to flash\n"));
+ pm8001_dbg(pm8001_ha, INIT,
+ "Encryption enabled with error.Saving encryption key to flash\n");
pm80xx_encrypt_update(pm8001_ha);
}
}
@@ -1533,8 +1500,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
} while ((value != 0) && (--max_wait_count));
if (!max_wait_count) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
+ pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=%x\n", value);
return -1;
}
@@ -1551,9 +1517,8 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
break;
} while (--max_wait_count);
if (!max_wait_count) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk(" TIME OUT MPI State = 0x%x\n",
- gst_len_mpistate & GST_MPI_STATE_MASK));
+ pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n",
+ gst_len_mpistate & GST_MPI_STATE_MASK);
return -1;
}
@@ -1581,9 +1546,9 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
u32 r1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
u32 r2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
u32 r3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "MPI state is not ready scratch: %x:%x:%x:%x\n",
- r0, r1, r2, r3));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "MPI state is not ready scratch: %x:%x:%x:%x\n",
+ r0, r1, r2, r3);
/* if things aren't ready but the bootloader is ok then
* try the reset anyway.
*/
@@ -1593,25 +1558,25 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
}
/* checked for reset register normal state; 0x0 */
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("reset register before write : 0x%x\n", regval));
+ pm8001_dbg(pm8001_ha, INIT, "reset register before write : 0x%x\n",
+ regval);
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
msleep(500);
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("reset register after write 0x%x\n", regval));
+ pm8001_dbg(pm8001_ha, INIT, "reset register after write 0x%x\n",
+ regval);
if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" soft reset successful [regval: 0x%x]\n",
- regval));
+ pm8001_dbg(pm8001_ha, MSG,
+ " soft reset successful [regval: 0x%x]\n",
+ regval);
} else {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" soft reset failed [regval: 0x%x]\n",
- regval));
+ pm8001_dbg(pm8001_ha, MSG,
+ " soft reset failed [regval: 0x%x]\n",
+ regval);
/* check bootloader is successfully executed or in HDA mode */
bootloader_state =
@@ -1619,28 +1584,27 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
SCRATCH_PAD1_BOOTSTATE_MASK;
if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "Bootloader state - HDA mode SEEPROM\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Bootloader state - HDA mode SEEPROM\n");
} else if (bootloader_state ==
SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "Bootloader state - HDA mode Bootstrap Pin\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Bootloader state - HDA mode Bootstrap Pin\n");
} else if (bootloader_state ==
SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "Bootloader state - HDA mode soft reset\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Bootloader state - HDA mode soft reset\n");
} else if (bootloader_state ==
SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "Bootloader state-HDA mode critical error\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "Bootloader state-HDA mode critical error\n");
}
return -EBUSY;
}
/* check the firmware status after reset */
if (-1 == check_fw_ready(pm8001_ha)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Firmware is not ready!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n");
/* check iButton feature support for motherboard controller */
if (pm8001_ha->pdev->subsystem_vendor !=
PCI_VENDOR_ID_ADAPTEC2 &&
@@ -1652,21 +1616,18 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
ibutton1 = pm8001_cr32(pm8001_ha, 0,
MSGU_HOST_SCRATCH_PAD_7);
if (!ibutton0 && !ibutton1) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("iButton Feature is"
- " not Available!!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "iButton Feature is not Available!!!\n");
return -EBUSY;
}
if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("CRC Check for iButton"
- " Feature Failed!!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "CRC Check for iButton Feature Failed!!!\n");
return -EBUSY;
}
}
}
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SPCv soft reset Complete\n"));
+ pm8001_dbg(pm8001_ha, INIT, "SPCv soft reset Complete\n");
return 0;
}
@@ -1674,13 +1635,11 @@ static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
{
u32 i;
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip reset start\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip reset start\n");
/* do SPCv chip reset. */
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("SPC soft reset Complete\n"));
+ pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n");
/* Check this ..whether delay is required or no */
/* delay 10 usec */
@@ -1692,8 +1651,7 @@ static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
mdelay(1);
} while ((--i) != 0);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("chip reset finished\n"));
+ pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n");
}
/**
@@ -1769,15 +1727,14 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
int ret;
if (!pm8001_ha_dev) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "dev is null\n");
return;
}
task = sas_alloc_slow_task(GFP_ATOMIC);
if (!task) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
- "allocate task\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
}
@@ -1803,8 +1760,7 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
sizeof(task_abort), 0);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Executing abort task end\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Executing abort task end\n");
if (ret) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1827,8 +1783,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
task = sas_alloc_slow_task(GFP_ATOMIC);
if (!task) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("cannot allocate task !!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
return;
}
task->task_done = pm8001_task_done;
@@ -1836,8 +1791,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res) {
sas_free_task(task);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("cannot allocate tag !!!\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n");
return;
}
@@ -1848,8 +1802,8 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
if (!dev) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Domain device cannot be allocated\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Domain device cannot be allocated\n");
return;
}
@@ -1882,7 +1836,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
sizeof(sata_cmd), 0);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing read log end\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Executing read log end\n");
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1928,27 +1882,24 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
if (status && status != IO_UNDERFLOW)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sas IO status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
- "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t));
+ pm8001_dbg(pm8001_ha, DEV,
+ "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t);
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive"
- ":%016llx", SAS_ADDR(t->dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n",
+ SAS_ADDR(t->dev->sas_addr));
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
- param));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS ,param = 0x%x\n",
+ param);
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_GOOD;
@@ -1960,73 +1911,83 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
sas_ssp_task_response(pm8001_ha->dev, t, iu);
}
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_UNDERFLOW:
/* SSP Completion with error */
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
- param));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW ,param = 0x%x\n",
+ param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
/* Force the midlayer to retry */
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
@@ -2034,8 +1995,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -2045,67 +2005,78 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_DMA:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
if (!t->uldd_task)
@@ -2114,51 +2085,55 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_DS_NON_OPERATIONAL);
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_TM_TAG_NOT_FOUND:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
}
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("scsi_status = 0x%x\n ",
- psspPayload->ssp_resp_iu.status));
+ pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ",
+ psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "task 0x%p done with io_status 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
if (t->slow_task)
complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
@@ -2188,52 +2163,47 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("sas IO status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IOERR_DBG(pm8001_ha,
- pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
- port_id, tag, event));
+ pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n",
+ port_id, tag, event);
switch (event) {
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
return;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
@@ -2244,8 +2214,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -2255,94 +2224,86 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
return;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_OVERRUN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
- PM8001_IOERR_DBG(pm8001_ha,
- pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IOERR,
+ "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n");
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_CMD_FRAME_ISSUED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
return;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
@@ -2354,10 +2315,9 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "task 0x%p done with event 0x%x resp 0x%x "
- "stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -2392,8 +2352,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
tag = le32_to_cpu(psataPayload->tag);
if (!tag) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("tag null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
return;
}
ccb = &pm8001_ha->ccb_info[tag];
@@ -2402,8 +2361,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("ccb null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
return;
}
@@ -2411,29 +2369,26 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (t->dev && (t->dev->lldd_dev))
pm8001_dev = t->dev->lldd_dev;
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task null\n");
return;
}
if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
&& unlikely(!t || !t->lldd_task || !t->dev)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task or dev null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
}
ts = &t->task_status;
if (!ts) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("ts null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
return;
}
if (unlikely(status))
- PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
- "status:0x%x, tag:0x%x, task::0x%p\n",
- status, tag, t));
+ pm8001_dbg(pm8001_ha, IOERR,
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t);
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
@@ -2465,20 +2420,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
& 0xff000000)) +
pm8001_dev->attached_phy +
0x10);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%08x%08x", temp_sata_addr_hi,
- temp_sata_addr_low));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SAS Address of IO Failure Drive:%08x%08x\n",
+ temp_sata_addr_hi,
+ temp_sata_addr_low);
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SAS Address of IO Failure Drive:"
- "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "SAS Address of IO Failure Drive:%016llx\n",
+ SAS_ADDR(t->dev->sas_addr));
}
}
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_GOOD;
@@ -2500,94 +2455,100 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
ts->residual = param;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
- param));
+ pm8001_dbg(pm8001_ha, IO,
+ "SAS_PROTO_RESPONSE len = %d\n",
+ param);
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("PIO read len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO,
+ "PIO read len = %d\n", len);
} else if (t->ata_task.use_ncq) {
len = sizeof(struct set_dev_bits_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("FPDMA len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
+ len);
} else {
len = sizeof(struct dev_to_host_fis);
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("other len = %d\n", len));
+ pm8001_dbg(pm8001_ha, IO, "other len = %d\n",
+ len);
}
if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
resp->frame_len = len;
memcpy(&resp->ending_fis[0], sata_resp, len);
ts->buf_valid_size = sizeof(*resp);
} else
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("response too large\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "response too large\n");
}
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
/* following cases are to do cases */
case IO_UNDERFLOW:
/* SATA Completion with error */
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
ts->residual = param;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
@@ -2595,8 +2556,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2610,8 +2570,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
@@ -2626,15 +2586,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2648,57 +2610,65 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_DMA:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_REJECTED_NCQ_MODE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2711,14 +2681,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_DS_IN_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2731,18 +2701,21 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
+ if (pm8001_dev)
+ atomic_dec(&pm8001_dev->running_req);
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
@@ -2751,10 +2724,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p done with io_status 0x%x"
- " resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
if (t->slow_task)
complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
@@ -2785,13 +2757,11 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t = ccb->task;
pm8001_dev = ccb->device;
} else {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("No CCB !!!. returning\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
return;
}
if (event)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("SATA EVENT 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
/* Check if this is NCQ error */
if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
@@ -2804,54 +2774,49 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
if (unlikely(!t || !t->lldd_task || !t->dev)) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task or dev null\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
}
ts = &t->task_status;
- PM8001_IOERR_DBG(pm8001_ha,
- pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
- port_id, tag, event));
+ pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n",
+ port_id, tag, event);
switch (event) {
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_EPROTO;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
@@ -2862,8 +2827,8 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
if (!t->uldd_task) {
@@ -2877,107 +2842,96 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_NAK_RECEIVED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_PEER_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_NAK_R_ERR;
break;
case IO_XFER_ERROR_REJECTED_NCQ_MODE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_UNDERRUN;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_RDY_OVERRUN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_OFFSET_MISMATCH:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_CMD_FRAME_ISSUED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
break;
case IO_XFER_PIO_SETUP_ERROR:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n");
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
case IO_XFER_DMA_ACTIVATE_TIMEOUT:
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "IO_XFR_DMA_ACTIVATE_TIMEOUT\n");
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", event));
+ pm8001_dbg(pm8001_ha, IO, "Unknown status 0x%x\n", event);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_TO;
@@ -2989,10 +2943,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p done with io_status 0x%x"
- " resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -3025,94 +2978,87 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts = &t->task_status;
pm8001_dev = ccb->device;
if (status)
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("smp IO status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status);
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
- PM8001_DEV_DBG(pm8001_ha,
- pm8001_printk("tag::0x%x status::0x%x\n", tag, status));
+ pm8001_dbg(pm8001_ha, DEV, "tag::0x%x status::0x%x\n", tag, status);
switch (status) {
case IO_SUCCESS:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_GOOD;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("DIRECT RESPONSE Length:%d\n",
- param));
+ pm8001_dbg(pm8001_ha, IO,
+ "DIRECT RESPONSE Length:%d\n",
+ param);
pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
((u64)sg_dma_address
(&t->smp_task.smp_resp))));
for (i = 0; i < param; i++) {
*(pdma_respaddr+i) = psmpPayload->_r_a[i];
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
- i, *(pdma_respaddr+i),
- psmpPayload->_r_a[i]));
+ pm8001_dbg(pm8001_ha, IO,
+ "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
+ i, *(pdma_respaddr + i),
+ psmpPayload->_r_a[i]);
}
}
break;
case IO_ABORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ABORTED IOMB\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_OVERFLOW:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
if (pm8001_dev)
- pm8001_dev->running_req--;
+ atomic_dec(&pm8001_dev->running_req);
break;
case IO_NO_DEVICE:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PHY_DOWN;
break;
case IO_ERROR_HW_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAM_STAT_BUSY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
case IO_OPEN_CNX_ERROR_BREAK:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
@@ -3123,8 +3069,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -3133,75 +3078,68 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
break;
case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
- "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_CONN_RATE;
break;
case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
case IO_XFER_ERROR_RX_FRAME:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_ERROR_INTERNAL_SMP_RESOURCE:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
break;
case IO_PORT_IN_RESET:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_PORT_IN_RESET\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_DS_NON_OPERATIONAL:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
break;
case IO_DS_IN_RECOVERY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ pm8001_dbg(pm8001_ha, IO,
+ "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown status 0x%x\n", status));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
/* not allowed case. Therefore, return failed status */
@@ -3213,10 +3151,9 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "task 0x%p done with io_status 0x%x resp 0x%x"
- "stat 0x%x but aborted by upper layer!\n",
- t, status, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p done with io_status 0x%x resp 0x%xstat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -3313,38 +3250,34 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
port->port_state = portstate;
port->wide_port_phymap |= (1U << phy_id);
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "portid:%d; phyid:%d; linkrate:%d; "
- "portstate:%x; devicetype:%x\n",
- port_id, phy_id, link_rate, portstate, deviceType));
+ pm8001_dbg(pm8001_ha, MSG,
+ "portid:%d; phyid:%d; linkrate:%d; portstate:%x; devicetype:%x\n",
+ port_id, phy_id, link_rate, portstate, deviceType);
switch (deviceType) {
case SAS_PHY_UNUSED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("device type no device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "device type no device.\n");
break;
case SAS_END_DEVICE:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "end device.\n");
pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
PHY_NOTIFY_ENABLE_SPINUP);
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
case SAS_EDGE_EXPANDER_DEVICE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("expander device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "expander device.\n");
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("fanout expander device.\n"));
+ pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n");
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("unknown device type(%x)\n", deviceType));
+ pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n",
+ deviceType);
break;
}
phy->phy_type |= PORT_TYPE_SAS;
@@ -3363,7 +3296,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
if (pm8001_ha->flags == PM8001F_RUN_TIME)
- msleep(200);/*delay a moment to wait disk to spinup*/
+ mdelay(200); /* delay a moment to wait for disk to spin up */
pm8001_bytes_dmaed(pm8001_ha, phy_id);
}
@@ -3392,9 +3325,9 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "port id %d, phy id %d link_rate %d portstate 0x%x\n",
- port_id, phy_id, link_rate, portstate));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "port id %d, phy id %d link_rate %d portstate 0x%x\n",
+ port_id, phy_id, link_rate, portstate);
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
@@ -3444,10 +3377,10 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
case PORT_VALID:
break;
case PORT_INVALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" PortInvalid portID %d\n", port_id));
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Last phy Down and port invalid\n"));
+ pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n",
+ port_id);
+ pm8001_dbg(pm8001_ha, MSG,
+ " Last phy Down and port invalid\n");
if (port_sata) {
phy->phy_type = 0;
port->port_attached = 0;
@@ -3457,19 +3390,18 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
sas_phy_disconnected(&phy->sas_phy);
break;
case PORT_IN_RESET:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Port In Reset portID %d\n", port_id));
+ pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n",
+ port_id);
break;
case PORT_NOT_ESTABLISHED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Phy Down and PORT_NOT_ESTABLISHED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ " Phy Down and PORT_NOT_ESTABLISHED\n");
port->port_attached = 0;
break;
case PORT_LOSTCOMM:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Phy Down and PORT_LOSTCOMM\n"));
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" Last phy Down and port invalid\n"));
+ pm8001_dbg(pm8001_ha, MSG, " Phy Down and PORT_LOSTCOMM\n");
+ pm8001_dbg(pm8001_ha, MSG,
+ " Last phy Down and port invalid\n");
if (port_sata) {
port->port_attached = 0;
phy->phy_type = 0;
@@ -3480,9 +3412,9 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk(" Phy Down and(default) = 0x%x\n",
- portstate));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ " Phy Down and(default) = 0x%x\n",
+ portstate);
break;
}
@@ -3503,9 +3435,9 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
le32_to_cpu(pPayload->phyid);
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
- status, phy_id));
+ pm8001_dbg(pm8001_ha, INIT,
+ "phy start resp status:0x%x, phyid:0x%x\n",
+ status, phy_id);
if (status == 0) {
phy->phy_state = PHY_LINK_DOWN;
if (pm8001_ha->flags == PM8001F_RUN_TIME &&
@@ -3532,18 +3464,18 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
if (thermal_event & 0x40) {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Thermal Event: Local high temperature violated!\n"));
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Thermal Event: Measured local high temperature %d\n",
- ((rht_lht & 0xFF00) >> 8)));
+ pm8001_dbg(pm8001_ha, IO,
+ "Thermal Event: Local high temperature violated!\n");
+ pm8001_dbg(pm8001_ha, IO,
+ "Thermal Event: Measured local high temperature %d\n",
+ ((rht_lht & 0xFF00) >> 8));
}
if (thermal_event & 0x10) {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Thermal Event: Remote high temperature violated!\n"));
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Thermal Event: Measured remote high temperature %d\n",
- ((rht_lht & 0xFF000000) >> 24)));
+ pm8001_dbg(pm8001_ha, IO,
+ "Thermal Event: Remote high temperature violated!\n");
+ pm8001_dbg(pm8001_ha, IO,
+ "Thermal Event: Measured remote high temperature %d\n",
+ ((rht_lht & 0xFF000000) >> 24));
}
return 0;
}
@@ -3572,42 +3504,36 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_DEV_DBG(pm8001_ha,
- pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
- port_id, phy_id, eventType, status));
+ pm8001_dbg(pm8001_ha, DEV,
+ "portid:%d phyid:%d event:0x%x status:0x%x\n",
+ port_id, phy_id, eventType, status);
switch (eventType) {
case HW_EVENT_SAS_PHY_UP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n");
hw_event_sas_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_PHY_UP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n");
hw_event_sata_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_SPINUP_HOLD:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
break;
case HW_EVENT_PHY_DOWN:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
hw_event_phy_down(pm8001_ha, piomb);
if (pm8001_ha->reset_in_progress) {
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Reset in progress\n"));
+ pm8001_dbg(pm8001_ha, MSG, "Reset in progress\n");
return 0;
}
phy->phy_attached = 0;
phy->phy_state = PHY_LINK_DISABLE;
break;
case HW_EVENT_PORT_INVALID:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
@@ -3615,8 +3541,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
/* the broadcast change primitive received, tell the LIBSAS this event
to revalidate the sas domain*/
case HW_EVENT_BROADCAST_CHANGE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
port_id, phy_id, 1, 0);
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
@@ -3625,81 +3550,74 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
break;
case HW_EVENT_PHY_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
sas_phy_disconnected(&phy->sas_phy);
phy->phy_attached = 0;
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
break;
case HW_EVENT_BROADCAST_EXP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
break;
case HW_EVENT_LINK_ERR_INVALID_DWORD:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_INVALID_DWORD\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_DISPARITY_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_CODE_VIOLATION\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_CODE_VIOLATION,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_MALFUNCTION:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
break;
case HW_EVENT_BROADCAST_SES:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
break;
case HW_EVENT_INBOUND_CRC_ERROR:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_INBOUND_CRC_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_HARD_RESET_RECEIVED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
break;
case HW_EVENT_ID_FRAME_TIMEOUT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
port_id, phy_id, 0, 0);
@@ -3708,8 +3626,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
@@ -3723,8 +3640,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_PORT_RECOVERY_TIMER_TMO,
port_id, phy_id, 0, 0);
@@ -3738,13 +3655,11 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case HW_EVENT_PORT_RECOVER:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
hw_event_port_recover(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_RESET_COMPLETE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n");
if (pm8001_ha->phy[phy_id].reset_completion) {
pm8001_ha->phy[phy_id].port_reset_status =
PORT_RESET_SUCCESS;
@@ -3753,12 +3668,11 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n");
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha,
- pm8001_printk("Unknown event type 0x%x\n", eventType));
+ pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type 0x%x\n",
+ eventType);
break;
}
return 0;
@@ -3778,9 +3692,8 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phyid =
le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("phy:0x%x status:0x%x\n",
- phyid, status));
+ pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n",
+ phyid, status);
if (status == PHY_STOP_SUCCESS ||
status == PHY_STOP_ERR_DEVICE_ATTACHED)
phy->phy_state = PHY_LINK_DISABLE;
@@ -3800,9 +3713,9 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
u32 status = le32_to_cpu(pPayload->status);
u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
- status, err_qlfr_pgcd));
+ pm8001_dbg(pm8001_ha, MSG,
+ "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+ status, err_qlfr_pgcd);
return 0;
}
@@ -3815,8 +3728,7 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3829,8 +3741,7 @@ static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3842,8 +3753,7 @@ static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
*/
static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3868,15 +3778,14 @@ static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
if (status) {
/* status is FAILED */
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("PhyProfile command failed with status "
- "0x%08X \n", status));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "PhyProfile command failed with status 0x%08X\n",
+ status);
rc = -1;
} else {
if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Invalid page code 0x%X\n",
- page_code));
+ pm8001_dbg(pm8001_ha, FAIL, "Invalid page code 0x%X\n",
+ page_code);
rc = -1;
}
}
@@ -3898,9 +3807,9 @@ static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
- status, kidx_new_curr_ksop, err_qlfr));
+ pm8001_dbg(pm8001_ha, MSG,
+ "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
+ status, kidx_new_curr_ksop, err_qlfr);
return 0;
}
@@ -3913,8 +3822,7 @@ static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3927,8 +3835,7 @@ static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n");
return 0;
}
@@ -3945,248 +3852,206 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
switch (opc) {
case OPC_OUB_ECHO:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n");
break;
case OPC_OUB_HW_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_HW_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n");
mpi_hw_event(pm8001_ha, piomb);
break;
case OPC_OUB_THERM_HW_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_THERMAL_EVENT\n");
mpi_thermal_hw_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n");
mpi_ssp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_SMP_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SMP_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n");
mpi_smp_completion(pm8001_ha, piomb);
break;
case OPC_OUB_LOCAL_PHY_CNTRL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n");
pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
break;
case OPC_OUB_DEV_REGIST:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n");
pm8001_mpi_reg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEREG_DEV:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("unregister the device\n"));
+ pm8001_dbg(pm8001_ha, MSG, "unregister the device\n");
pm8001_mpi_dereg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEV_HANDLE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n");
break;
case OPC_OUB_SATA_COMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_COMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n");
mpi_sata_completion(pm8001_ha, piomb);
break;
case OPC_OUB_SATA_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n");
mpi_sata_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n");
mpi_ssp_event(pm8001_ha, piomb);
break;
case OPC_OUB_DEV_HANDLE_ARRIV:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n");
/*This is for target*/
break;
case OPC_OUB_SSP_RECV_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n");
/*This is for target*/
break;
case OPC_OUB_FW_FLASH_UPDATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n");
pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GPIO_RESPONSE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n");
break;
case OPC_OUB_GPIO_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n");
break;
case OPC_OUB_GENERAL_EVENT:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n");
pm8001_mpi_general_event(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SATA_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SAS_DIAG_MODE_START_END:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SAS_DIAG_MODE_START_END\n");
break;
case OPC_OUB_SAS_DIAG_EXECUTE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n");
break;
case OPC_OUB_GET_TIME_STAMP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n");
break;
case OPC_OUB_SAS_HW_EVENT_ACK:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n");
break;
case OPC_OUB_PORT_CONTROL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n");
break;
case OPC_OUB_SMP_ABORT_RSP:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n");
pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_NVMD_DATA:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n");
pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SET_NVMD_DATA:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n");
pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEVICE_HANDLE_REMOVAL:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n");
break;
case OPC_OUB_SET_DEVICE_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n");
pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEVICE_STATE:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n");
break;
case OPC_OUB_SET_DEV_INFO:
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+ pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n");
break;
/* spcv specifc commands */
case OPC_OUB_PHY_START_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_PHY_START_RESP opcode:%x\n", opc);
mpi_phy_start_resp(pm8001_ha, piomb);
break;
case OPC_OUB_PHY_STOP_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc);
mpi_phy_stop_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SET_CONTROLLER_CONFIG:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc);
mpi_set_controller_config_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_CONTROLLER_CONFIG:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc);
mpi_get_controller_config_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_PHY_PROFILE:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc);
mpi_get_phy_profile_resp(pm8001_ha, piomb);
break;
case OPC_OUB_FLASH_OP_EXT:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc);
mpi_flash_op_ext_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SET_PHY_PROFILE:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc);
mpi_set_phy_profile_resp(pm8001_ha, piomb);
break;
case OPC_OUB_KEK_MANAGEMENT_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc);
mpi_kek_management_resp(pm8001_ha, piomb);
break;
case OPC_OUB_DEK_MANAGEMENT_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc);
mpi_dek_management_resp(pm8001_ha, piomb);
break;
case OPC_OUB_SSP_COALESCED_COMP_RESP:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
- "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
+ pm8001_dbg(pm8001_ha, MSG,
+ "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc);
ssp_coalesced_comp_resp(pm8001_ha, piomb);
break;
default:
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "Unknown outbound Queue IOMB OPC = 0x%x\n", opc);
break;
}
}
static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
{
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)));
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_1:0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6));
+ pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7));
}
static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
@@ -4203,8 +4068,9 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
SCRATCH_PAD_MIPSALL_READY) {
pm8001_ha->controller_fatal_error = true;
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "Firmware Fatal error! Regval:0x%x\n", regval));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Firmware Fatal error! Regval:0x%x\n",
+ regval);
print_scratchpad_registers(pm8001_ha);
return ret;
}
@@ -4281,7 +4147,6 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
char *preq_dma_addr = NULL;
__le64 tmp_addr;
u32 i, length;
- unsigned long flags;
memset(&smp_cmd, 0, sizeof(smp_cmd));
/*
@@ -4311,8 +4176,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
length = sg_req->length;
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SMP Frame Length %d\n", sg_req->length));
+ pm8001_dbg(pm8001_ha, IO, "SMP Frame Length %d\n", sg_req->length);
if (!(length - 8))
pm8001_ha->smp_exp_mode = SMP_DIRECT;
else
@@ -4324,8 +4188,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
/* INDIRECT MODE command settings. Use DMA */
if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "SMP REQUEST INDIRECT MODE\n");
/* for SPCv indirect mode. Place the top 4 bytes of
* SMP Request header here. */
for (i = 0; i < 4; i++)
@@ -4357,30 +4220,27 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
}
if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
- PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("SMP REQUEST DIRECT MODE\n"));
+ pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n");
for (i = 0; i < length; i++)
if (i < 16) {
smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Byte[%d]:%x (DMA data:%x)\n",
- i, smp_cmd.smp_req16[i],
- *(preq_dma_addr)));
+ pm8001_dbg(pm8001_ha, IO,
+ "Byte[%d]:%x (DMA data:%x)\n",
+ i, smp_cmd.smp_req16[i],
+ *(preq_dma_addr));
} else {
smp_cmd.smp_req[i] = *(preq_dma_addr+i);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Byte[%d]:%x (DMA data:%x)\n",
- i, smp_cmd.smp_req[i],
- *(preq_dma_addr)));
+ pm8001_dbg(pm8001_ha, IO,
+ "Byte[%d]:%x (DMA data:%x)\n",
+ i, smp_cmd.smp_req[i],
+ *(preq_dma_addr));
}
}
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
- spin_lock_irqsave(&circularQ->iq_lock, flags);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
sizeof(smp_cmd), 0);
- spin_unlock_irqrestore(&circularQ->iq_lock, flags);
if (rc)
goto err_out_2;
return 0;
@@ -4444,7 +4304,6 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
u64 phys_addr, start_addr, end_addr;
u32 end_addr_high, end_addr_low;
struct inbound_queue_table *circularQ;
- unsigned long flags;
u32 q_index, cpu_id;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
@@ -4471,9 +4330,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
!(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
- task->ssp_task.cmd->cmnd[0]));
+ pm8001_dbg(pm8001_ha, IO,
+ "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
+ task->ssp_task.cmd->cmnd[0]);
opc = OPC_INB_SSP_INI_DIF_ENC_IO;
/* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
ssp_cmd.dad_dir_m_tlr = cpu_to_le32
@@ -4503,13 +4362,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
if (end_addr_high != ssp_cmd.enc_addr_high) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("The sg list address "
- "start_addr=0x%016llx data_len=0x%x "
- "end_addr_high=0x%08x end_addr_low="
- "0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.enc_len,
- end_addr_high, end_addr_low));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
+ start_addr, ssp_cmd.enc_len,
+ end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
@@ -4533,9 +4389,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
(task->ssp_task.cmd->cmnd[4] << 8) |
(task->ssp_task.cmd->cmnd[5]));
} else {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Sending Normal SAS command 0x%x inb q %x\n",
- task->ssp_task.cmd->cmnd[0], q_index));
+ pm8001_dbg(pm8001_ha, IO,
+ "Sending Normal SAS command 0x%x inb q %x\n",
+ task->ssp_task.cmd->cmnd[0], q_index);
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem,
@@ -4559,13 +4415,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
if (end_addr_high != ssp_cmd.addr_high) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("The sg list address "
- "start_addr=0x%016llx data_len=0x%x "
- "end_addr_high=0x%08x end_addr_low="
- "0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.len,
- end_addr_high, end_addr_low));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
+ start_addr, ssp_cmd.len,
+ end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
@@ -4582,10 +4435,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.esgl = 0;
}
}
- spin_lock_irqsave(&circularQ->iq_lock, flags);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&ssp_cmd, sizeof(ssp_cmd), q_index);
- spin_unlock_irqrestore(&circularQ->iq_lock, flags);
return ret;
}
@@ -4614,19 +4465,19 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
if (task->data_dir == DMA_NONE) {
ATAP = 0x04; /* no data*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+ pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "DMA\n");
} else {
ATAP = 0x05; /* PIO*/
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
+ pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
if (task->ata_task.use_ncq &&
dev->sata_dev.class != ATA_DEV_ATAPI) {
ATAP = 0x07; /* FPDMA */
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
}
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
@@ -4646,9 +4497,9 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
!(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
- sata_cmd.sata_fis.command));
+ pm8001_dbg(pm8001_ha, IO,
+ "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
+ sata_cmd.sata_fis.command);
opc = OPC_INB_SATA_DIF_ENC_IO;
/* set encryption bit */
@@ -4676,13 +4527,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
if (end_addr_high != sata_cmd.enc_addr_high) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("The sg list address "
- "start_addr=0x%016llx data_len=0x%x "
- "end_addr_high=0x%08x end_addr_low"
- "=0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.enc_len,
- end_addr_high, end_addr_low));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
+ start_addr, sata_cmd.enc_len,
+ end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
@@ -4711,9 +4559,9 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
(sata_cmd.sata_fis.lbam_exp));
} else {
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
- "Sending Normal SATA command 0x%x inb %x\n",
- sata_cmd.sata_fis.command, q_index));
+ pm8001_dbg(pm8001_ha, IO,
+ "Sending Normal SATA command 0x%x inb %x\n",
+ sata_cmd.sata_fis.command, q_index);
/* dad (bit 0-1) is 0 */
sata_cmd.ncqtag_atap_dir_m_dad =
cpu_to_le32(((ncg_tag & 0xff)<<16) |
@@ -4739,13 +4587,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
if (end_addr_high != sata_cmd.addr_high) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("The sg list address "
- "start_addr=0x%016llx data_len=0x%x"
- "end_addr_high=0x%08x end_addr_low="
- "0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.len,
- end_addr_high, end_addr_low));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
+ start_addr, sata_cmd.len,
+ end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
@@ -4804,10 +4649,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("task 0x%p resp 0x%x "
- " stat 0x%x but aborted by upper layer "
- "\n", task, ts->resp, ts->stat));
+ pm8001_dbg(pm8001_ha, FAIL,
+ "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
+ task, ts->resp,
+ ts->stat);
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
return 0;
} else {
@@ -4815,14 +4660,13 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
flags);
pm8001_ccb_task_free_done(pm8001_ha, task,
ccb, tag);
+ atomic_dec(&pm8001_ha_dev->running_req);
return 0;
}
}
}
- spin_lock_irqsave(&circularQ->iq_lock, flags);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&sata_cmd, sizeof(sata_cmd), q_index);
- spin_unlock_irqrestore(&circularQ->iq_lock, flags);
return ret;
}
@@ -4843,8 +4687,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
+ pm8001_dbg(pm8001_ha, INIT, "PHY START REQ for phy_id %d\n", phy_id);
payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
LINKMODE_AUTO | pm8001_ha->link_rate | phy_id);
@@ -5008,9 +4851,9 @@ static irqreturn_t
pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm80xx_chip_interrupt_disable(pm8001_ha, vec);
- PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
- "irq vec %d, ODMR:0x%x\n",
- vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30));
process_oq(pm8001_ha, vec);
pm80xx_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -5029,13 +4872,13 @@ static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n"));
+ pm8001_dbg(pm8001_ha, FAIL, "Invalid tag\n");
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF));
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk(" phy profile command for phy %x ,length is %d\n",
- payload.ppc_phyid, length));
+ pm8001_dbg(pm8001_ha, INIT,
+ " phy profile command for phy %x ,length is %d\n",
+ payload.ppc_phyid, length);
for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
@@ -5056,7 +4899,7 @@ void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
length = length + PHY_DWORD_LENGTH;
}
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n"));
+ pm8001_dbg(pm8001_ha, INIT, "phy settings completed\n");
}
void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
@@ -5071,7 +4914,7 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk("Invalid tag"));
+ pm8001_dbg(pm8001_ha, INIT, "Invalid tag\n");
circularQ = &pm8001_ha->inbnd_q_tbl[0];
opc = OPC_INB_SET_PHY_PROFILE;
@@ -5088,8 +4931,7 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
if (rc)
pm8001_tag_free(pm8001_ha, tag);
- PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("PHY %d settings applied", phy));
+ pm8001_dbg(pm8001_ha, INIT, "PHY %d settings applied\n", phy);
}
const struct pm8001_dispatch pm8001_80xx_dispatch = {
.name = "pmc80xx",
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 701951a0f715..ec48bc276de6 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -1639,3 +1639,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define MEMBASE_II_SHIFT_REGISTER 0x1010
#endif
+
+/**
+ * As we know sleep (1~20) ms may result in sleep longer than ~20 ms, hence we
+ * choose 20 ms interval.
+ */
+#define FW_READY_INTERVAL 20
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index cbe5fab793eb..834556ea21d2 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5234,54 +5234,37 @@ static void pmcraid_remove(struct pci_dev *pdev)
return;
}
-#ifdef CONFIG_PM
/**
* pmcraid_suspend - driver suspend entry point for power management
- * @pdev: PCI device structure
- * @state: PCI power state to suspend routine
+ * @dev: Device structure
*
* Return Value - 0 always
*/
-static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pmcraid_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
pmcraid_kill_tasklets(pinstance);
- pci_set_drvdata(pinstance->pdev, pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
/**
* pmcraid_resume - driver resume entry point PCI power management
- * @pdev: PCI device structure
+ * @dev: Device structure
*
* Return Value - 0 in case of success. Error code in case of any failure
*/
-static int pmcraid_resume(struct pci_dev *pdev)
+static int __maybe_unused pmcraid_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
struct Scsi_Host *host = pinstance->host;
- int rc;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- rc = pci_enable_device(pdev);
-
- if (rc) {
- dev_err(&pdev->dev, "resume: Enable device failed\n");
- return rc;
- }
-
- pci_set_master(pdev);
+ int rc = 0;
if (sizeof(dma_addr_t) == 4 ||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
@@ -5334,18 +5317,10 @@ release_host:
scsi_host_put(host);
disable_device:
- pci_disable_device(pdev);
return rc;
}
-#else
-
-#define pmcraid_suspend NULL
-#define pmcraid_resume NULL
-
-#endif /* CONFIG_PM */
-
/**
* pmcraid_complete_ioa_reset - Called by either timer or tasklet during
* completion of the ioa reset
@@ -5833,6 +5808,8 @@ out_disable_device:
return -ENODEV;
}
+static SIMPLE_DEV_PM_OPS(pmcraid_pm_ops, pmcraid_suspend, pmcraid_resume);
+
/*
* PCI driver structure of pmcraid driver
*/
@@ -5841,8 +5818,7 @@ static struct pci_driver pmcraid_driver = {
.id_table = pmcraid_pci_table,
.probe = pmcraid_probe,
.remove = pmcraid_remove,
- .suspend = pmcraid_suspend,
- .resume = pmcraid_resume,
+ .driver.pm = &pmcraid_pm_ops,
.shutdown = pmcraid_shutdown
};
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index f75c0b5cd587..ccb5771f1cb7 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -402,7 +402,7 @@ fail_free_bounce:
return error;
}
-static int ps3rom_remove(struct ps3_system_bus_device *_dev)
+static void ps3rom_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct Scsi_Host *host = ps3_system_bus_get_drvdata(&dev->sbd);
@@ -412,7 +412,6 @@ static int ps3rom_remove(struct ps3_system_bus_device *_dev)
scsi_host_put(host);
ps3_system_bus_set_drvdata(&dev->sbd, NULL);
kfree(dev->bounce_buf);
- return 0;
}
static struct ps3_system_bus_driver ps3rom = {
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 0e2cbb164eeb..88a592d09433 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -11,7 +11,6 @@
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_fc2.h>
#include <scsi/scsi_tcq.h>
-#include <scsi/fc_encode.h>
#include <linux/version.h>
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 61fab01d2d52..47ad64b06623 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2245,7 +2245,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
chap_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
@@ -2253,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
@@ -2766,7 +2766,7 @@ retry_probe:
QEDI_ERR(&qedi->dbg_ctx,
"Unable to start offload thread!\n");
rc = -ENODEV;
- goto free_cid_que;
+ goto free_tmf_thread;
}
INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler);
@@ -2790,6 +2790,8 @@ retry_probe:
return 0;
+free_tmf_thread:
+ destroy_workqueue(qedi->tmf_thread);
free_cid_que:
qedi_release_cid_que(qedi);
free_uio:
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 4f0486fe30dd..30c7e5e63851 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2493,6 +2493,8 @@ typedef struct fc_port {
int generation;
struct se_session *se_sess;
+ struct list_head sess_cmd_list;
+ spinlock_t sess_cmd_lock;
struct kref sess_kref;
struct qla_tgt *tgt;
unsigned long expires;
@@ -3294,8 +3296,10 @@ struct isp_operations {
void (*fw_dump)(struct scsi_qla_host *vha);
void (*mpi_fw_dump)(struct scsi_qla_host *, int);
+ /* Context: task, might sleep */
int (*beacon_on) (struct scsi_qla_host *);
int (*beacon_off) (struct scsi_qla_host *);
+
void (*beacon_blink) (struct scsi_qla_host *);
void *(*read_optrom)(struct scsi_qla_host *, void *,
@@ -3306,7 +3310,10 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
int (*start_scsi_mq) (srb_t *);
+
+ /* Context: task, might sleep */
int (*abort_isp) (struct scsi_qla_host *);
+
int (*iospace_config)(struct qla_hw_data *);
int (*initialize_adapter)(struct scsi_qla_host *);
};
@@ -4968,8 +4975,7 @@ struct secure_flash_update_block_pk {
} while (0)
#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \
- atomic_dec(&__qpair->ref_count); \
-
+ atomic_dec(&__qpair->ref_count)
#define QLA_ENA_CONF(_ha) {\
int i;\
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index e28c4b7ec55f..391ac75e3de3 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3558,10 +3558,10 @@ login_logout:
if (fcport->flags & FCF_FCP2_DEVICE)
fcport->logout_on_delete = 0;
- ql_dbg(ql_dbg_disc, vha, 0x20f0,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- fcport->port_name);
+ ql_log(ql_log_warn, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
qlt_schedule_sess_for_deletion(fcport);
continue;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 898c70b8ebbf..dcc0f0d823db 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1268,9 +1268,10 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x211b,
- "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
+ "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
- fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc");
+ fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
+ NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -1932,26 +1933,58 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
break;
}
- /*
- * Retry PRLI with other FC-4 type if failure occurred on dual
- * FCP/NVMe port
- */
- if (NVME_FCP_TARGET(ea->fcport)) {
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post %s prli\n",
- __func__, __LINE__, ea->fcport->port_name,
- (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ?
- "NVMe" : "FCP");
- if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC priority %s, fc4type %x\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
+ "FCP" : "NVMe", ea->fcport->fc4_type);
+
+ if (N2N_TOPO(vha->hw)) {
+ if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) {
ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
- else
+ ea->fcport->fc4_type |= FS_FC4TYPE_FCP;
+ } else {
ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
- }
+ ea->fcport->fc4_type |= FS_FC4TYPE_NVME;
+ }
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- ea->fcport->keep_nport_handle = 0;
- ea->fcport->logout_on_delete = 1;
- qlt_schedule_sess_for_deletion(ea->fcport);
+ if (ea->fcport->n2n_link_reset_cnt < 3) {
+ ea->fcport->n2n_link_reset_cnt++;
+ vha->relogin_jif = jiffies + 2 * HZ;
+ /*
+ * PRLI failed. Reset link to kick start
+ * state machine
+ */
+ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ } else {
+ ql_log(ql_log_warn, vha, 0x2119,
+ "%s %d %8phC Unable to reconnect\n",
+ __func__, __LINE__,
+ ea->fcport->port_name);
+ }
+ } else {
+ /*
+ * switch connect. login failed. Take connection down
+ * and allow relogin to retrigger
+ */
+ if (NVME_FCP_TARGET(ea->fcport)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post %s prli\n",
+ __func__, __LINE__,
+ ea->fcport->port_name,
+ (ea->fcport->fc4_type & FS_FC4TYPE_NVME)
+ ? "NVMe" : "FCP");
+ if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+ else
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
+ }
+
+ ea->fcport->flags &= ~FCF_ASYNC_SENT;
+ ea->fcport->keep_nport_handle = 0;
+ ea->fcport->logout_on_delete = 1;
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ }
break;
}
}
@@ -4974,6 +5007,9 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
+ INIT_LIST_HEAD(&fcport->sess_cmd_list);
+ spin_lock_init(&fcport->sess_cmd_lock);
+
return fcport;
}
@@ -5946,6 +5982,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
break;
}
+ if (fcport->login_retry == 0)
+ fcport->login_retry =
+ vha->hw->login_retry_count;
/*
* If device was not a fabric device before.
*/
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a24b82de4aab..f9142dbec112 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2226,11 +2226,13 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
srb_t *sp;
struct srb_iocb *iocb;
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
+ u16 comp_status;
sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
if (!sp)
return;
+ comp_status = le16_to_cpu(sts->comp_status);
iocb = &sp->u.iocb_cmd;
type = sp->name;
fcport = sp->fcport;
@@ -2244,7 +2246,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
ql_log(ql_log_warn, fcport->vha, 0x5039,
"Async-%s error - hdl=%x completion status(%x).\n",
- type, sp->handle, sts->comp_status);
+ type, sp->handle, comp_status);
iocb->u.tmf.data = QLA_FUNCTION_FAILED;
} else if ((le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
@@ -2260,6 +2262,30 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
}
}
+ switch (comp_status) {
+ case CS_PORT_LOGGED_OUT:
+ case CS_PORT_CONFIG_CHG:
+ case CS_PORT_BUSY:
+ case CS_INCOMPLETE:
+ case CS_PORT_UNAVAILABLE:
+ case CS_TIMEOUT:
+ case CS_RESET:
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+ "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa,
+ port_state_str[FCS_ONLINE],
+ comp_status);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ break;
+
+ default:
+ break;
+ }
+
if (iocb->u.tmf.data != QLA_SUCCESS)
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
sts, sizeof(*sts));
@@ -3952,10 +3978,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
/* user wants to control IRQ setting for target mode */
ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
- ha->msix_count, PCI_IRQ_MSIX);
+ min((u16)ha->msix_count, (u16)num_online_cpus()),
+ PCI_IRQ_MSIX);
} else
ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
- ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ min((u16)ha->msix_count, (u16)num_online_cpus()),
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&desc);
if (ret < 0) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 07afd0d8a8f3..d7d4ab65009c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1129,7 +1129,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
if (ha->flags.scm_supported_a &&
(ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
ha->flags.scm_supported_f = 1;
- ha->sf_init_cb->flags |= BIT_13;
+ ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
}
ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
(ha->flags.scm_supported_f) ? "Supported" :
@@ -1137,9 +1137,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
if (vha->flags.nvme2_enabled) {
/* set BIT_15 of special feature control block for SLER */
- ha->sf_init_cb->flags |= BIT_15;
+ ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
/* set BIT_14 of special feature control block for PI CTRL*/
- ha->sf_init_cb->flags |= BIT_14;
+ ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
}
}
@@ -3998,9 +3998,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->n2n_flag = 1;
fcport->keep_nport_handle = 1;
- fcport->fc4_type = FS_FC4TYPE_FCP;
- if (vha->flags.nvme_enabled)
- fcport->fc4_type |= FS_FC4TYPE_NVME;
if (wwn_to_u64(vha->port_name) >
wwn_to_u64(fcport->port_name)) {
@@ -4030,7 +4027,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
return;
- break;
case TOPO_FL:
ha->current_topology = ISP_CFG_FL;
break;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index b7a1dc24db38..eab559b3b257 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -227,7 +227,7 @@ static void qla_nvme_abort_work(struct work_struct *work)
"%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
__func__, sp, sp->handle, fcport, fcport->deleted);
- if (!ha->flags.fw_started && fcport->deleted)
+ if (!ha->flags.fw_started || fcport->deleted)
goto out;
if (ha->flags.host_shutting_down) {
@@ -554,19 +554,15 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fcport = qla_rport->fcport;
- if (!qpair || !fcport)
- return -ENODEV;
-
- if (!qpair->fw_started || fcport->deleted)
+ if (unlikely(!qpair || !fcport || fcport->deleted))
return -EBUSY;
- vha = fcport->vha;
-
if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return -ENODEV;
- if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- (qpair && !qpair->fw_started) || fcport->deleted)
+ vha = fcport->vha;
+
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
return -EBUSY;
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index b3ba0de5d4fb..0677295957bc 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -489,29 +489,26 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
return data;
}
-#define IDC_LOCK_TIMEOUT 100000000
+/*
+ * Context: task, might sleep
+ */
int qla82xx_idc_lock(struct qla_hw_data *ha)
{
- int i;
- int done = 0, timeout = 0;
+ const int delay_ms = 100, timeout_ms = 2000;
+ int done, total = 0;
- while (!done) {
+ might_sleep();
+
+ while (true) {
/* acquire semaphore5 from PCI HW block */
done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
if (done == 1)
break;
- if (timeout >= IDC_LOCK_TIMEOUT)
+ if (WARN_ON_ONCE(total >= timeout_ms))
return -1;
- timeout++;
-
- /* Yield CPU */
- if (!in_interrupt())
- schedule();
- else {
- for (i = 0; i < 20; i++)
- cpu_relax();
- }
+ total += delay_ms;
+ msleep(delay_ms);
}
return 0;
@@ -965,7 +962,7 @@ qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
static int
qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
{
- uint32_t val;
+ uint32_t val = 0;
int i, ret;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 01ccd4526707..68a16c95dcb7 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -139,7 +139,7 @@ qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
uint32_t mask)
{
unsigned long timeout;
- uint32_t temp;
+ uint32_t temp = 0;
/* jiffies after 100ms */
timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
@@ -2594,7 +2594,7 @@ qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
struct qla8044_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
- uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value = 0;
struct qla8044_minidump_entry_mux *mux_hdr;
uint32_t *data_ptr = *d_ptr;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f9c8ae9d669e..0e0fe5b09496 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
int ql2xenforce_iocb_limit = 1;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
- "Enforce IOCB throttling, to avoid FW congestion. (default: 0)");
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
/*
* CT6 CTX allocation cache
@@ -884,8 +884,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_fail_command;
}
- if (!fcport) {
- cmd->result = DID_NO_CONNECT << 16;
+ if (!fcport || fcport->deleted) {
+ cmd->result = DID_IMM_RETRY << 16;
goto qc24_fail_command;
}
@@ -966,8 +966,8 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
goto qc24_fail_command;
}
- if (!fcport) {
- cmd->result = DID_NO_CONNECT << 16;
+ if (!fcport || fcport->deleted) {
+ cmd->result = DID_IMM_RETRY << 16;
goto qc24_fail_command;
}
@@ -3265,7 +3265,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
- ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
if (unlikely(!ha->wq)) {
ret = -ENOMEM;
goto probe_failed;
@@ -5619,25 +5619,10 @@ qla83xx_service_idc_aen(struct work_struct *work)
}
}
-static void
-qla83xx_wait_logic(void)
-{
- int i;
-
- /* Yield CPU */
- if (!in_interrupt()) {
- /*
- * Wait about 200ms before retrying again.
- * This controls the number of retries for single
- * lock operation.
- */
- msleep(100);
- schedule();
- } else {
- for (i = 0; i < 20; i++)
- cpu_relax(); /* This a nop instr on i386 */
- }
-}
+/*
+ * Control the frequency of IDC lock retries
+ */
+#define QLA83XX_WAIT_LOGIC_MS 100
static int
qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
@@ -5727,7 +5712,7 @@ retry_lockid:
goto exit;
if (o_drv_lockid == n_drv_lockid) {
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
goto retry_lockid;
} else
return QLA_SUCCESS;
@@ -5736,6 +5721,9 @@ exit:
return rval;
}
+/*
+ * Context: task, can sleep
+ */
void
qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
@@ -5743,6 +5731,8 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
uint32_t lock_owner;
struct qla_hw_data *ha = base_vha->hw;
+ might_sleep();
+
/* IDC-lock implementation using driver-lock/lock-id remote registers */
retry_lock:
if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
@@ -5761,7 +5751,7 @@ retry_lock:
/* Retry/Perform IDC-Lock recovery */
if (qla83xx_idc_lock_recovery(base_vha)
== QLA_SUCCESS) {
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
goto retry_lock;
} else
ql_log(ql_log_warn, base_vha, 0xb075,
@@ -6259,6 +6249,9 @@ void qla24xx_process_purex_list(struct purex_list *list)
}
}
+/*
+ * Context: task, can sleep
+ */
void
qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
@@ -6269,6 +6262,8 @@ qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
uint32_t data;
struct qla_hw_data *ha = base_vha->hw;
+ might_sleep();
+
/* IDC-unlock implementation using driver-unlock/lock-id
* remote registers
*/
@@ -6284,7 +6279,7 @@ retry_unlock:
/* SV: XXX: IDC unlock retrying needed here? */
/* Retry for IDC-unlock */
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
retry++;
ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
"Failed to release IDC lock, retrying=%d\n", retry);
@@ -6292,7 +6287,7 @@ retry_unlock:
}
} else if (retry < 10) {
/* Retry for IDC-unlock */
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
retry++;
ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
"Failed to read drv-lockid, retrying=%d\n", retry);
@@ -6308,7 +6303,7 @@ retry_unlock2:
if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
if (retry < 10) {
/* Retry for IDC-unlock */
- qla83xx_wait_logic();
+ msleep(QLA83XX_WAIT_LOGIC_MS);
retry++;
ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
"Failed to release IDC lock, retrying=%d\n", retry);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 0f92e9a044dc..f771fabcba59 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2634,14 +2634,14 @@ qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf,
sizeof(struct secure_flash_update_block));
for (i = 0; i < (sizeof(struct secure_flash_update_block) >> 2); i++)
- check_sum += p[i];
+ check_sum += le32_to_cpu(p[i]);
check_sum = (~check_sum) + 1;
- if (check_sum != p[i]) {
+ if (check_sum != le32_to_cpu(p[i])) {
ql_log(ql_log_warn, vha, 0x7097,
"SFUB checksum failed, 0x%x, 0x%x\n",
- check_sum, p[i]);
+ check_sum, le32_to_cpu(p[i]));
return QLA_COMMAND_ERROR;
}
@@ -2721,7 +2721,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
if (ha->flags.secure_adapter && region.attribute) {
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
- "Region %x is secure\n", region.code);
+ "Region %x is secure\n", le16_to_cpu(region.code));
switch (le16_to_cpu(region.code)) {
case FLT_REG_FW:
@@ -2775,7 +2775,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
default:
ql_log(ql_log_warn + ql_dbg_verbose, vha,
0xffff, "Secure region %x not supported\n",
- region.code);
+ le16_to_cpu(region.code));
rval = QLA_COMMAND_ERROR;
goto done;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a27a625839e6..0d09480b66cd 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1273,7 +1273,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
qla24xx_chk_fcp_state(sess);
- ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
+ ql_dbg(ql_log_warn, sess->vha, 0xe001,
"Scheduling sess %p for deletion %8phC\n",
sess, sess->port_name);
@@ -2083,6 +2083,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd;
struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
+ struct qla_tgt_cmd *abort_cmd;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
@@ -2110,17 +2111,17 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
*/
mcmd->se_cmd.cpuid = h->cpuid;
- if (ha->tgt.tgt_ops->find_cmd_by_tag) {
- struct qla_tgt_cmd *abort_cmd;
-
- abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
+ abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
le32_to_cpu(abts->exchange_addr_to_abort));
- if (abort_cmd && abort_cmd->qpair) {
- mcmd->qpair = abort_cmd->qpair;
- mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
- mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
- mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
- }
+ if (!abort_cmd)
+ return -EIO;
+ mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
+
+ if (abort_cmd->qpair) {
+ mcmd->qpair = abort_cmd->qpair;
+ mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
+ mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
+ mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
}
INIT_WORK(&mcmd->work, qlt_do_tmr_work);
@@ -4291,6 +4292,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->cmd_type = TYPE_TGT_CMD;
memcpy(&cmd->atio, atio, sizeof(*atio));
+ INIT_LIST_HEAD(&cmd->sess_cmd_list);
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
qlt_incr_num_pend_cmds(vha);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 1cff7c69d448..10e5e6c8087d 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -856,6 +856,7 @@ struct qla_tgt_cmd {
uint8_t cmd_type;
uint8_t pad[7];
struct se_cmd se_cmd;
+ struct list_head sess_cmd_list;
struct fc_port *sess;
struct qla_qpair *qpair;
uint32_t reset_count;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index bd8623ee156a..26c13a953b97 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -928,7 +928,8 @@ qla27xx_template_checksum(void *p, ulong size)
static inline int
qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
{
- return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
+ return qla27xx_template_checksum(tmp,
+ le32_to_cpu(tmp->template_size)) == 0;
}
static inline int
@@ -944,7 +945,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
ulong len = 0;
if (qla27xx_fwdt_template_valid(tmp)) {
- len = tmp->template_size;
+ len = le32_to_cpu(tmp->template_size);
tmp = memcpy(buf, tmp, len);
ql27xx_edit_template(vha, tmp);
qla27xx_walk_template(vha, tmp, buf, &len);
@@ -960,7 +961,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
ulong len = 0;
if (qla27xx_fwdt_template_valid(tmp)) {
- len = tmp->template_size;
+ len = le32_to_cpu(tmp->template_size);
qla27xx_walk_template(vha, tmp, NULL, &len);
}
@@ -972,7 +973,7 @@ qla27xx_fwdt_template_size(void *p)
{
struct qla27xx_fwdt_template *tmp = p;
- return tmp->template_size;
+ return le32_to_cpu(tmp->template_size);
}
int
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index c47184db5081..6e0987edfceb 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -12,7 +12,7 @@
struct __packed qla27xx_fwdt_template {
__le32 template_type;
__le32 entry_offset;
- uint32_t template_size;
+ __le32 template_size;
uint32_t count; /* borrow field for running/residual count */
__le32 entry_count;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c2d4da52f4a9..ccec858875dd 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.00.103-k"
+#define QLA2XXX_VERSION "10.02.00.104-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 0
-#define QLA_DRIVER_BETA_VER 103
+#define QLA_DRIVER_BETA_VER 104
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 61017acd3458..b55fc768a2a7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -255,6 +255,7 @@ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ unsigned long flags;
cmd->cmd_in_wq = 0;
@@ -265,6 +266,10 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
cmd->trc_flags |= TRC_CMD_FREE;
cmd->cmd_sent_to_fw = 0;
+ spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags);
+ list_del_init(&cmd->sess_cmd_list);
+ spin_unlock_irqrestore(&cmd->sess->sess_cmd_lock, flags);
+
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
@@ -363,15 +368,10 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess)
static void tcm_qla2xxx_close_session(struct se_session *se_sess)
{
struct fc_port *sess = se_sess->fabric_sess_ptr;
- struct scsi_qla_host *vha;
- unsigned long flags;
BUG_ON(!sess);
- vha = sess->vha;
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- target_sess_cmd_list_set_waiting(se_sess);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ target_stop_session(se_sess);
sess->explicit_logout = 1;
tcm_qla2xxx_put_sess(sess);
@@ -451,13 +451,14 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
struct se_portal_group *se_tpg;
struct tcm_qla2xxx_tpg *tpg;
#endif
- int flags = TARGET_SCF_ACK_KREF;
+ int target_flags = TARGET_SCF_ACK_KREF;
+ unsigned long flags;
if (bidi)
- flags |= TARGET_SCF_BIDI_OP;
+ target_flags |= TARGET_SCF_BIDI_OP;
if (se_cmd->cpuid != WORK_CPU_UNBOUND)
- flags |= TARGET_SCF_USE_CPUID;
+ target_flags |= TARGET_SCF_USE_CPUID;
sess = cmd->sess;
if (!sess) {
@@ -479,11 +480,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
return 0;
}
#endif
-
cmd->qpair->tgt_counters.qla_core_sbt_cmd++;
+
+ spin_lock_irqsave(&sess->sess_cmd_lock, flags);
+ list_add_tail(&cmd->sess_cmd_list, &sess->sess_cmd_list);
+ spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
+
return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
- cmd->unpacked_lun, data_length, fcp_task_attr,
- data_dir, flags);
+ cmd->unpacked_lun, data_length, fcp_task_attr,
+ data_dir, target_flags);
}
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
@@ -574,13 +579,11 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
struct fc_port *sess = mcmd->sess;
struct se_cmd *se_cmd = &mcmd->se_cmd;
int transl_tmr_func = 0;
- int flags = TARGET_SCF_ACK_KREF;
switch (tmr_func) {
case QLA_TGT_ABTS:
pr_debug("%ld: ABTS received\n", sess->vha->host_no);
transl_tmr_func = TMR_ABORT_TASK;
- flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG;
break;
case QLA_TGT_2G_ABORT_TASK:
pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no);
@@ -613,31 +616,26 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
}
return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
- transl_tmr_func, GFP_ATOMIC, tag, flags);
+ transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
}
static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess,
uint64_t tag)
{
- struct qla_tgt_cmd *cmd = NULL;
- struct se_cmd *secmd;
+ struct qla_tgt_cmd *cmd;
unsigned long flags;
if (!sess->se_sess)
return NULL;
- spin_lock_irqsave(&sess->se_sess->sess_cmd_lock, flags);
- list_for_each_entry(secmd, &sess->se_sess->sess_cmd_list, se_cmd_list) {
- /* skip task management functions, including tmr->task_cmd */
- if (secmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- continue;
-
- if (secmd->tag == tag) {
- cmd = container_of(secmd, struct qla_tgt_cmd, se_cmd);
- break;
- }
+ spin_lock_irqsave(&sess->sess_cmd_lock, flags);
+ list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list) {
+ if (cmd->se_cmd.tag == tag)
+ goto done;
}
- spin_unlock_irqrestore(&sess->se_sess->sess_cmd_lock, flags);
+ cmd = NULL;
+done:
+ spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
return cmd;
}
@@ -767,11 +765,19 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
- struct qla_tgt_cmd *cmd = container_of(se_cmd,
- struct qla_tgt_cmd, se_cmd);
+ struct qla_tgt_cmd *cmd;
+ unsigned long flags;
- if (qlt_abort_cmd(cmd))
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
return;
+
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+
+ spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags);
+ list_del_init(&cmd->sess_cmd_list);
+ spin_unlock_irqrestore(&cmd->sess->sess_cmd_lock, flags);
+
+ qlt_abort_cmd(cmd);
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -820,7 +826,7 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
{
- target_sess_cmd_list_set_waiting(sess->se_sess);
+ target_stop_session(sess->se_sess);
}
static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl,
@@ -1394,8 +1400,6 @@ static void tcm_qla2xxx_free_session(struct fc_port *sess)
struct se_session *se_sess;
struct tcm_qla2xxx_lport *lport;
- BUG_ON(in_interrupt());
-
se_sess = sess->se_sess;
if (!se_sess) {
pr_err("struct fc_port->se_sess is NULL\n");
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index f5b382ed0a1b..031569c496e5 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -435,9 +435,9 @@ struct isp_operations {
void (*wr_reg_direct) (struct scsi_qla_host *, ulong, uint32_t);
int (*rd_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t *);
int (*wr_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t);
- int (*idc_lock) (struct scsi_qla_host *);
+ int (*idc_lock) (struct scsi_qla_host *); /* Context: task, can sleep */
void (*idc_unlock) (struct scsi_qla_host *);
- void (*rom_lock_recovery) (struct scsi_qla_host *);
+ void (*rom_lock_recovery) (struct scsi_qla_host *); /* Context: task, can sleep */
void (*queue_mailbox_command) (struct scsi_qla_host *, uint32_t *, int);
void (*process_mailbox_interrupt) (struct scsi_qla_host *, int);
};
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index b8f02210aeb0..ea60057b2e20 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -114,7 +114,6 @@ irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id);
void qla4_82xx_queue_iocb(struct scsi_qla_host *ha);
void qla4_82xx_complete_iocb(struct scsi_qla_host *ha);
-int qla4_82xx_crb_win_lock(struct scsi_qla_host *);
void qla4_82xx_crb_win_unlock(struct scsi_qla_host *);
int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index f1767b21076f..e6e35e6958f6 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -375,6 +375,35 @@ qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+
+/*
+ * Context: atomic
+ */
+static int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore3 from PCI HW block */
+ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+ return -1;
+
+ timeout++;
+ udelay(10);
+ }
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
+ return 0;
+}
+
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
+{
+ qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+}
+
void
qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
{
@@ -475,40 +504,6 @@ int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
return rval;
}
-#define CRB_WIN_LOCK_TIMEOUT 100000000
-
-int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
-{
- int i;
- int done = 0, timeout = 0;
-
- while (!done) {
- /* acquire semaphore3 from PCI HW block */
- done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
- if (done == 1)
- break;
- if (timeout >= CRB_WIN_LOCK_TIMEOUT)
- return -1;
-
- timeout++;
-
- /* Yield CPU */
- if (!in_interrupt())
- schedule();
- else {
- for (i = 0; i < 20; i++)
- cpu_relax(); /*This a nop instr on i386*/
- }
- }
- qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
- return 0;
-}
-
-void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
-{
- qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
-}
-
#define IDC_LOCK_TIMEOUT 100000000
/**
@@ -517,12 +512,15 @@ void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
*
* General purpose lock used to synchronize access to
* CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
+ *
+ * Context: task, can sleep
**/
int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
{
- int i;
int done = 0, timeout = 0;
+ might_sleep();
+
while (!done) {
/* acquire semaphore5 from PCI HW block */
done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
@@ -532,14 +530,7 @@ int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
return -1;
timeout++;
-
- /* Yield CPU */
- if (!in_interrupt())
- schedule();
- else {
- for (i = 0; i < 20; i++)
- cpu_relax(); /*This a nop instr on i386*/
- }
+ msleep(100);
}
return 0;
}
@@ -880,15 +871,18 @@ qla4_82xx_decode_crb_addr(unsigned long addr)
static long rom_max_timeout = 100;
static long qla4_82xx_rom_lock_timeout = 100;
+/*
+ * Context: task, can_sleep
+ */
static int
qla4_82xx_rom_lock(struct scsi_qla_host *ha)
{
- int i;
int done = 0, timeout = 0;
+ might_sleep();
+
while (!done) {
/* acquire semaphore2 from PCI HW block */
-
done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
@@ -896,14 +890,7 @@ qla4_82xx_rom_lock(struct scsi_qla_host *ha)
return -1;
timeout++;
-
- /* Yield CPU */
- if (!in_interrupt())
- schedule();
- else {
- for (i = 0; i < 20; i++)
- cpu_relax(); /*This a nop instr on i386*/
- }
+ msleep(20);
}
qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
return 0;
@@ -1780,7 +1767,7 @@ qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
{
- int rval = QLA_ERROR;
+ int rval;
/*
* FW Load priority:
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2c23b692e318..a4b014e1cd8c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -685,7 +685,6 @@ static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
if (!ha->chap_list) {
ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
- rval = QLA_ERROR;
goto exit_get_chap;
}
@@ -697,14 +696,12 @@ static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
if (chap_index > max_chap_entries) {
ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
- rval = QLA_ERROR;
goto exit_get_chap;
}
*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
if ((*chap_entry)->cookie !=
__constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
- rval = QLA_ERROR;
*chap_entry = NULL;
} else {
rval = QLA_SUCCESS;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 24c0f7ec0351..4a08c450b756 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6740,7 +6740,7 @@ static int __init scsi_debug_init(void)
k = sdeb_zbc_model_str(sdeb_zbc_model_s);
if (k < 0) {
ret = k;
- goto free_vm;
+ goto free_q_arr;
}
sdeb_zbc_model = k;
switch (sdeb_zbc_model) {
@@ -6753,7 +6753,8 @@ static int __init scsi_debug_init(void)
break;
default:
pr_err("Invalid ZBC model\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_q_arr;
}
}
if (sdeb_zbc_model != BLK_ZONED_NONE) {
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index ba84244c1b4f..d92cec12454c 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -559,7 +559,8 @@ static int scsi_dev_info_list_add_str(char *dev_list)
}
/**
- * get_device_flags - get device specific flags from the dynamic device list.
+ * scsi_get_device_flags - get device specific flags from the dynamic
+ * device list.
* @sdev: &scsi_device to get flags for
* @vendor: vendor name
* @model: model name
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 03c6d0620bfd..b3f14f05340a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -249,7 +249,8 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
@@ -766,6 +767,9 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x24: /* depopulation in progress */
action = ACTION_DELAYED_RETRY;
break;
+ case 0x0a: /* ALUA state transition */
+ blk_stat = BLK_STS_AGAIN;
+ fallthrough;
default:
action = ACTION_FAIL;
break;
@@ -1203,6 +1207,8 @@ static blk_status_t
scsi_device_state_check(struct scsi_device *sdev, struct request *req)
{
switch (sdev->sdev_state) {
+ case SDEV_CREATED:
+ return BLK_STS_OK;
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
/*
@@ -1229,18 +1235,18 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
return BLK_STS_RESOURCE;
case SDEV_QUIESCE:
/*
- * If the devices is blocked we defer normal commands.
+ * If the device is blocked we only accept power management
+ * commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
+ if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
return BLK_STS_RESOURCE;
return BLK_STS_OK;
default:
/*
* For any other not fully online state we only allow
- * special commands. In particular any user initiated
- * command is not allowed.
+ * power management commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PM))
return BLK_STS_IOERR;
return BLK_STS_OK;
}
@@ -1455,7 +1461,7 @@ static void scsi_softirq_done(struct request *rq)
}
/**
- * scsi_dispatch_command - Dispatch a command to the low-level driver.
+ * scsi_dispatch_cmd - Dispatch a command to the low-level driver.
* @cmd: command block we are dispatching.
*
* Return: nonzero return request was rejected and device's queue needs to be
@@ -1706,6 +1712,11 @@ out_put_budget:
if (scsi_device_blocked(sdev))
ret = BLK_STS_DEV_RESOURCE;
break;
+ case BLK_STS_AGAIN:
+ scsi_req(req)->result = DID_BUS_BUSY << 16;
+ if (req->rq_flags & RQF_DONTPREP)
+ scsi_mq_uninit_cmd(cmd);
+ break;
default:
if (unlikely(!scsi_device_online(sdev)))
scsi_req(req)->result = DID_NO_CONNECT << 16;
@@ -2334,7 +2345,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
EXPORT_SYMBOL(scsi_device_set_state);
/**
- * sdev_evt_emit - emit a single SCSI device uevent
+ * scsi_evt_emit - emit a single SCSI device uevent
* @sdev: associated SCSI device
* @evt: event to emit
*
@@ -2382,7 +2393,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
}
/**
- * sdev_evt_thread - send a uevent for each scsi event
+ * scsi_evt_thread - send a uevent for each scsi event
* @work: work struct for scsi_device
*
* Dispatch queued events to their associated scsi_device kobjects
@@ -2508,15 +2519,13 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
- * scsi_device_quiesce - Block user issued commands.
+ * scsi_device_quiesce - Block all commands except power management.
* @sdev: scsi device to quiesce.
*
* This works by trying to transition to the SDEV_QUIESCE state
* (which must be a legal transition). When the device is in this
- * state, only special requests will be accepted, all others will
- * be deferred. Since special requests may also be requeued requests,
- * a successful return doesn't guarantee the device will be
- * totally quiescent.
+ * state, only power management requests will be accepted, all others will
+ * be deferred.
*
* Must be called with user context, may sleep.
*
@@ -2578,12 +2587,12 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
+ if (sdev->sdev_state == SDEV_QUIESCE)
+ scsi_device_set_state(sdev, SDEV_RUNNING);
if (sdev->quiesced_by) {
sdev->quiesced_by = NULL;
blk_clear_pm_only(sdev->request_queue);
}
- if (sdev->sdev_state == SDEV_QUIESCE)
- scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
@@ -2948,6 +2957,78 @@ void sdev_enable_disk_events(struct scsi_device *sdev)
}
EXPORT_SYMBOL(sdev_enable_disk_events);
+static unsigned char designator_prio(const unsigned char *d)
+{
+ if (d[1] & 0x30)
+ /* not associated with LUN */
+ return 0;
+
+ if (d[3] == 0)
+ /* invalid length */
+ return 0;
+
+ /*
+ * Order of preference for lun descriptor:
+ * - SCSI name string
+ * - NAA IEEE Registered Extended
+ * - EUI-64 based 16-byte
+ * - EUI-64 based 12-byte
+ * - NAA IEEE Registered
+ * - NAA IEEE Extended
+ * - EUI-64 based 8-byte
+ * - SCSI name string (truncated)
+ * - T10 Vendor ID
+ * as longer descriptors reduce the likelyhood
+ * of identification clashes.
+ */
+
+ switch (d[1] & 0xf) {
+ case 8:
+ /* SCSI name string, variable-length UTF-8 */
+ return 9;
+ case 3:
+ switch (d[4] >> 4) {
+ case 6:
+ /* NAA registered extended */
+ return 8;
+ case 5:
+ /* NAA registered */
+ return 5;
+ case 4:
+ /* NAA extended */
+ return 4;
+ case 3:
+ /* NAA locally assigned */
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (d[3]) {
+ case 16:
+ /* EUI64-based, 16 byte */
+ return 7;
+ case 12:
+ /* EUI64-based, 12 byte */
+ return 6;
+ case 8:
+ /* EUI64-based, 8 byte */
+ return 3;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ /* T10 vendor ID */
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/**
* scsi_vpd_lun_id - return a unique device identification
* @sdev: SCSI device
@@ -2964,7 +3045,7 @@ EXPORT_SYMBOL(sdev_enable_disk_events);
*/
int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
{
- u8 cur_id_type = 0xff;
+ u8 cur_id_prio = 0;
u8 cur_id_size = 0;
const unsigned char *d, *cur_id_str;
const struct scsi_vpd *vpd_pg83;
@@ -2977,20 +3058,6 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
return -ENXIO;
}
- /*
- * Look for the correct descriptor.
- * Order of preference for lun descriptor:
- * - SCSI name string
- * - NAA IEEE Registered Extended
- * - EUI-64 based 16-byte
- * - EUI-64 based 12-byte
- * - NAA IEEE Registered
- * - NAA IEEE Extended
- * - T10 Vendor ID
- * as longer descriptors reduce the likelyhood
- * of identification clashes.
- */
-
/* The id string must be at least 20 bytes + terminating NULL byte */
if (id_len < 21) {
rcu_read_unlock();
@@ -2998,39 +3065,32 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
}
memset(id, 0, id_len);
- d = vpd_pg83->data + 4;
- while (d < vpd_pg83->data + vpd_pg83->len) {
- /* Skip designators not referring to the LUN */
- if ((d[1] & 0x30) != 0x00)
- goto next_desig;
+ for (d = vpd_pg83->data + 4;
+ d < vpd_pg83->data + vpd_pg83->len;
+ d += d[3] + 4) {
+ u8 prio = designator_prio(d);
+
+ if (prio == 0 || cur_id_prio > prio)
+ continue;
switch (d[1] & 0xf) {
case 0x1:
/* T10 Vendor ID */
if (cur_id_size > d[3])
break;
- /* Prefer anything */
- if (cur_id_type > 0x01 && cur_id_type != 0xff)
- break;
+ cur_id_prio = prio;
cur_id_size = d[3];
if (cur_id_size + 4 > id_len)
cur_id_size = id_len - 4;
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
id_size = snprintf(id, id_len, "t10.%*pE",
cur_id_size, cur_id_str);
break;
case 0x2:
/* EUI-64 */
- if (cur_id_size > d[3])
- break;
- /* Prefer NAA IEEE Registered Extended */
- if (cur_id_type == 0x3 &&
- cur_id_size == d[3])
- break;
+ cur_id_prio = prio;
cur_id_size = d[3];
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
switch (cur_id_size) {
case 8:
id_size = snprintf(id, id_len,
@@ -3048,17 +3108,14 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
cur_id_str);
break;
default:
- cur_id_size = 0;
break;
}
break;
case 0x3:
/* NAA */
- if (cur_id_size > d[3])
- break;
+ cur_id_prio = prio;
cur_id_size = d[3];
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
switch (cur_id_size) {
case 8:
id_size = snprintf(id, id_len,
@@ -3071,32 +3128,29 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
cur_id_str);
break;
default:
- cur_id_size = 0;
break;
}
break;
case 0x8:
/* SCSI name string */
- if (cur_id_size + 4 > d[3])
+ if (cur_id_size > d[3])
break;
/* Prefer others for truncated descriptor */
- if (cur_id_size && d[3] > id_len)
- break;
+ if (d[3] > id_len) {
+ prio = 2;
+ if (cur_id_prio > prio)
+ break;
+ }
+ cur_id_prio = prio;
cur_id_size = id_size = d[3];
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
if (cur_id_size >= id_len)
cur_id_size = id_len - 1;
memcpy(id, cur_id_str, cur_id_size);
- /* Decrease priority for truncated descriptor */
- if (cur_id_size != id_size)
- cur_id_size = 6;
break;
default:
break;
}
-next_desig:
- d += d[3] + 4;
}
rcu_read_unlock();
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d6e344fa33ad..b6378c8ca783 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -370,7 +370,7 @@ static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store
shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(cmd_per_lun, "%hd\n");
-shost_rd_attr(can_queue, "%hd\n");
+shost_rd_attr(can_queue, "%d\n");
shost_rd_attr(sg_tablesize, "%hu\n");
shost_rd_attr(sg_prot_tablesize, "%hu\n");
shost_rd_attr(unchecked_isa_dma, "%d\n");
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2ff7f06203da..a926e8f9e56e 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -22,6 +22,7 @@
#include <net/netlink.h>
#include <scsi/scsi_netlink_fc.h>
#include <scsi/scsi_bsg_fc.h>
+#include <uapi/scsi/fc/fc_els.h>
#include "scsi_priv.h"
static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
@@ -33,6 +34,11 @@ static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
static void fc_bsg_remove(struct request_queue *);
static void fc_bsg_goose_queue(struct fc_rport *);
+static void fc_li_stats_update(struct fc_fn_li_desc *li_desc,
+ struct fc_fpin_stats *stats);
+static void fc_delivery_stats_update(u32 reason_code,
+ struct fc_fpin_stats *stats);
+static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats);
/*
* Module Parameters
@@ -419,6 +425,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->fabric_name = -1;
memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
+ memset(&fc_host->fpin_stats, 0, sizeof(fc_host->fpin_stats));
fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
@@ -629,7 +636,263 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
EXPORT_SYMBOL(fc_host_post_vendor_event);
/**
- * fc_host_rcv_fpin - routine to process a received FPIN.
+ * fc_find_rport_by_wwpn - find the fc_rport pointer for a given wwpn
+ * @shost: host the fc_rport is associated with
+ * @wwpn: wwpn of the fc_rport device
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+struct fc_rport *
+fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn)
+{
+ struct fc_rport *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ list_for_each_entry(rport, &fc_host_rports(shost), peers) {
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ continue;
+
+ if (rport->port_name == wwpn) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return rport;
+ }
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return NULL;
+}
+EXPORT_SYMBOL(fc_find_rport_by_wwpn);
+
+static void
+fc_li_stats_update(struct fc_fn_li_desc *li_desc,
+ struct fc_fpin_stats *stats)
+{
+ stats->li += be32_to_cpu(li_desc->event_count);
+ switch (be16_to_cpu(li_desc->event_type)) {
+ case FPIN_LI_UNKNOWN:
+ stats->li_failure_unknown +=
+ be32_to_cpu(li_desc->event_count);
+ break;
+ case FPIN_LI_LINK_FAILURE:
+ stats->li_link_failure_count +=
+ be32_to_cpu(li_desc->event_count);
+ break;
+ case FPIN_LI_LOSS_OF_SYNC:
+ stats->li_loss_of_sync_count +=
+ be32_to_cpu(li_desc->event_count);
+ break;
+ case FPIN_LI_LOSS_OF_SIG:
+ stats->li_loss_of_signals_count +=
+ be32_to_cpu(li_desc->event_count);
+ break;
+ case FPIN_LI_PRIM_SEQ_ERR:
+ stats->li_prim_seq_err_count +=
+ be32_to_cpu(li_desc->event_count);
+ break;
+ case FPIN_LI_INVALID_TX_WD:
+ stats->li_invalid_tx_word_count +=
+ be32_to_cpu(li_desc->event_count);
+ break;
+ case FPIN_LI_INVALID_CRC:
+ stats->li_invalid_crc_count +=
+ be32_to_cpu(li_desc->event_count);
+ break;
+ case FPIN_LI_DEVICE_SPEC:
+ stats->li_device_specific +=
+ be32_to_cpu(li_desc->event_count);
+ break;
+ }
+}
+
+static void
+fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats)
+{
+ stats->dn++;
+ switch (reason_code) {
+ case FPIN_DELI_UNKNOWN:
+ stats->dn_unknown++;
+ break;
+ case FPIN_DELI_TIMEOUT:
+ stats->dn_timeout++;
+ break;
+ case FPIN_DELI_UNABLE_TO_ROUTE:
+ stats->dn_unable_to_route++;
+ break;
+ case FPIN_DELI_DEVICE_SPEC:
+ stats->dn_device_specific++;
+ break;
+ }
+}
+
+static void
+fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats)
+{
+ stats->cn++;
+ switch (event_type) {
+ case FPIN_CONGN_CLEAR:
+ stats->cn_clear++;
+ break;
+ case FPIN_CONGN_LOST_CREDIT:
+ stats->cn_lost_credit++;
+ break;
+ case FPIN_CONGN_CREDIT_STALL:
+ stats->cn_credit_stall++;
+ break;
+ case FPIN_CONGN_OVERSUBSCRIPTION:
+ stats->cn_oversubscription++;
+ break;
+ case FPIN_CONGN_DEVICE_SPEC:
+ stats->cn_device_specific++;
+ }
+}
+
+/*
+ * fc_fpin_li_stats_update - routine to update Link Integrity
+ * event statistics.
+ * @shost: host the FPIN was received on
+ * @tlv: pointer to link integrity descriptor
+ *
+ */
+static void
+fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
+{
+ u8 i;
+ struct fc_rport *rport = NULL;
+ struct fc_rport *attach_rport = NULL;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv;
+ u64 wwpn;
+
+ rport = fc_find_rport_by_wwpn(shost,
+ be64_to_cpu(li_desc->attached_wwpn));
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ attach_rport = rport;
+ fc_li_stats_update(li_desc, &attach_rport->fpin_stats);
+ }
+
+ if (be32_to_cpu(li_desc->pname_count) > 0) {
+ for (i = 0;
+ i < be32_to_cpu(li_desc->pname_count);
+ i++) {
+ wwpn = be64_to_cpu(li_desc->pname_list[i]);
+ rport = fc_find_rport_by_wwpn(shost, wwpn);
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ if (rport == attach_rport)
+ continue;
+ fc_li_stats_update(li_desc,
+ &rport->fpin_stats);
+ }
+ }
+ }
+
+ if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn))
+ fc_li_stats_update(li_desc, &fc_host->fpin_stats);
+}
+
+/*
+ * fc_fpin_delivery_stats_update - routine to update Delivery Notification
+ * event statistics.
+ * @shost: host the FPIN was received on
+ * @tlv: pointer to delivery descriptor
+ *
+ */
+static void
+fc_fpin_delivery_stats_update(struct Scsi_Host *shost,
+ struct fc_tlv_desc *tlv)
+{
+ struct fc_rport *rport = NULL;
+ struct fc_rport *attach_rport = NULL;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_fn_deli_desc *dn_desc = (struct fc_fn_deli_desc *)tlv;
+ u32 reason_code = be32_to_cpu(dn_desc->deli_reason_code);
+
+ rport = fc_find_rport_by_wwpn(shost,
+ be64_to_cpu(dn_desc->attached_wwpn));
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ attach_rport = rport;
+ fc_delivery_stats_update(reason_code,
+ &attach_rport->fpin_stats);
+ }
+
+ if (fc_host->port_name == be64_to_cpu(dn_desc->attached_wwpn))
+ fc_delivery_stats_update(reason_code, &fc_host->fpin_stats);
+}
+
+/*
+ * fc_fpin_peer_congn_stats_update - routine to update Peer Congestion
+ * event statistics.
+ * @shost: host the FPIN was received on
+ * @tlv: pointer to peer congestion descriptor
+ *
+ */
+static void
+fc_fpin_peer_congn_stats_update(struct Scsi_Host *shost,
+ struct fc_tlv_desc *tlv)
+{
+ u8 i;
+ struct fc_rport *rport = NULL;
+ struct fc_rport *attach_rport = NULL;
+ struct fc_fn_peer_congn_desc *pc_desc =
+ (struct fc_fn_peer_congn_desc *)tlv;
+ u16 event_type = be16_to_cpu(pc_desc->event_type);
+ u64 wwpn;
+
+ rport = fc_find_rport_by_wwpn(shost,
+ be64_to_cpu(pc_desc->attached_wwpn));
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ attach_rport = rport;
+ fc_cn_stats_update(event_type, &attach_rport->fpin_stats);
+ }
+
+ if (be32_to_cpu(pc_desc->pname_count) > 0) {
+ for (i = 0;
+ i < be32_to_cpu(pc_desc->pname_count);
+ i++) {
+ wwpn = be64_to_cpu(pc_desc->pname_list[i]);
+ rport = fc_find_rport_by_wwpn(shost, wwpn);
+ if (rport &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
+ rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
+ if (rport == attach_rport)
+ continue;
+ fc_cn_stats_update(event_type,
+ &rport->fpin_stats);
+ }
+ }
+ }
+}
+
+/*
+ * fc_fpin_congn_stats_update - routine to update Congestion
+ * event statistics.
+ * @shost: host the FPIN was received on
+ * @tlv: pointer to congestion descriptor
+ *
+ */
+static void
+fc_fpin_congn_stats_update(struct Scsi_Host *shost,
+ struct fc_tlv_desc *tlv)
+{
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_fn_congn_desc *congn = (struct fc_fn_congn_desc *)tlv;
+
+ fc_cn_stats_update(be16_to_cpu(congn->event_type),
+ &fc_host->fpin_stats);
+}
+
+/**
+ * fc_host_fpin_rcv - routine to process a received FPIN.
* @shost: host the FPIN was received on
* @fpin_len: length of FPIN payload, in bytes
* @fpin_buf: pointer to FPIN payload
@@ -640,6 +903,38 @@ EXPORT_SYMBOL(fc_host_post_vendor_event);
void
fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf)
{
+ struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf;
+ struct fc_tlv_desc *tlv;
+ u32 desc_cnt = 0, bytes_remain;
+ u32 dtag;
+
+ /* Update Statistics */
+ tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
+ bytes_remain = fpin_len - offsetof(struct fc_els_fpin, fpin_desc);
+ bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
+
+ while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
+ bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
+ dtag = be32_to_cpu(tlv->desc_tag);
+ switch (dtag) {
+ case ELS_DTAG_LNK_INTEGRITY:
+ fc_fpin_li_stats_update(shost, tlv);
+ break;
+ case ELS_DTAG_DELIVERY:
+ fc_fpin_delivery_stats_update(shost, tlv);
+ break;
+ case ELS_DTAG_PEER_CONGEST:
+ fc_fpin_peer_congn_stats_update(shost, tlv);
+ break;
+ case ELS_DTAG_CONGESTION:
+ fc_fpin_congn_stats_update(shost, tlv);
+ }
+
+ desc_cnt++;
+ bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ }
+
fc_host_post_fc_event(shost, fc_get_event_number(),
FCH_EVT_LINK_FPIN, fpin_len, fpin_buf, 0);
}
@@ -991,6 +1286,67 @@ store_fc_rport_fast_io_fail_tmo(struct device *dev,
static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
+#define fc_rport_fpin_statistic(name) \
+static ssize_t fc_rport_fpinstat_##name(struct device *cd, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fc_rport *rport = transport_class_to_rport(cd); \
+ \
+ return snprintf(buf, 20, "0x%llx\n", rport->fpin_stats.name); \
+} \
+static FC_DEVICE_ATTR(rport, fpin_##name, 0444, fc_rport_fpinstat_##name, NULL)
+
+fc_rport_fpin_statistic(dn);
+fc_rport_fpin_statistic(dn_unknown);
+fc_rport_fpin_statistic(dn_timeout);
+fc_rport_fpin_statistic(dn_unable_to_route);
+fc_rport_fpin_statistic(dn_device_specific);
+fc_rport_fpin_statistic(cn);
+fc_rport_fpin_statistic(cn_clear);
+fc_rport_fpin_statistic(cn_lost_credit);
+fc_rport_fpin_statistic(cn_credit_stall);
+fc_rport_fpin_statistic(cn_oversubscription);
+fc_rport_fpin_statistic(cn_device_specific);
+fc_rport_fpin_statistic(li);
+fc_rport_fpin_statistic(li_failure_unknown);
+fc_rport_fpin_statistic(li_link_failure_count);
+fc_rport_fpin_statistic(li_loss_of_sync_count);
+fc_rport_fpin_statistic(li_loss_of_signals_count);
+fc_rport_fpin_statistic(li_prim_seq_err_count);
+fc_rport_fpin_statistic(li_invalid_tx_word_count);
+fc_rport_fpin_statistic(li_invalid_crc_count);
+fc_rport_fpin_statistic(li_device_specific);
+
+static struct attribute *fc_rport_statistics_attrs[] = {
+ &device_attr_rport_fpin_dn.attr,
+ &device_attr_rport_fpin_dn_unknown.attr,
+ &device_attr_rport_fpin_dn_timeout.attr,
+ &device_attr_rport_fpin_dn_unable_to_route.attr,
+ &device_attr_rport_fpin_dn_device_specific.attr,
+ &device_attr_rport_fpin_li.attr,
+ &device_attr_rport_fpin_li_failure_unknown.attr,
+ &device_attr_rport_fpin_li_link_failure_count.attr,
+ &device_attr_rport_fpin_li_loss_of_sync_count.attr,
+ &device_attr_rport_fpin_li_loss_of_signals_count.attr,
+ &device_attr_rport_fpin_li_prim_seq_err_count.attr,
+ &device_attr_rport_fpin_li_invalid_tx_word_count.attr,
+ &device_attr_rport_fpin_li_invalid_crc_count.attr,
+ &device_attr_rport_fpin_li_device_specific.attr,
+ &device_attr_rport_fpin_cn.attr,
+ &device_attr_rport_fpin_cn_clear.attr,
+ &device_attr_rport_fpin_cn_lost_credit.attr,
+ &device_attr_rport_fpin_cn_credit_stall.attr,
+ &device_attr_rport_fpin_cn_oversubscription.attr,
+ &device_attr_rport_fpin_cn_device_specific.attr,
+ NULL
+};
+
+static struct attribute_group fc_rport_statistics_group = {
+ .name = "statistics",
+ .attrs = fc_rport_statistics_attrs,
+};
+
/*
* FC SCSI Target Attribute Management
@@ -1744,6 +2100,42 @@ fc_host_statistic(fc_xid_not_found);
fc_host_statistic(fc_xid_busy);
fc_host_statistic(fc_seq_not_found);
fc_host_statistic(fc_non_bls_resp);
+fc_host_statistic(cn_sig_warn);
+fc_host_statistic(cn_sig_alarm);
+
+
+#define fc_host_fpin_statistic(name) \
+static ssize_t fc_host_fpinstat_##name(struct device *cd, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct Scsi_Host *shost = transport_class_to_shost(cd); \
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost); \
+ \
+ return snprintf(buf, 20, "0x%llx\n", fc_host->fpin_stats.name); \
+} \
+static FC_DEVICE_ATTR(host, fpin_##name, 0444, fc_host_fpinstat_##name, NULL)
+
+fc_host_fpin_statistic(dn);
+fc_host_fpin_statistic(dn_unknown);
+fc_host_fpin_statistic(dn_timeout);
+fc_host_fpin_statistic(dn_unable_to_route);
+fc_host_fpin_statistic(dn_device_specific);
+fc_host_fpin_statistic(cn);
+fc_host_fpin_statistic(cn_clear);
+fc_host_fpin_statistic(cn_lost_credit);
+fc_host_fpin_statistic(cn_credit_stall);
+fc_host_fpin_statistic(cn_oversubscription);
+fc_host_fpin_statistic(cn_device_specific);
+fc_host_fpin_statistic(li);
+fc_host_fpin_statistic(li_failure_unknown);
+fc_host_fpin_statistic(li_link_failure_count);
+fc_host_fpin_statistic(li_loss_of_sync_count);
+fc_host_fpin_statistic(li_loss_of_signals_count);
+fc_host_fpin_statistic(li_prim_seq_err_count);
+fc_host_fpin_statistic(li_invalid_tx_word_count);
+fc_host_fpin_statistic(li_invalid_crc_count);
+fc_host_fpin_statistic(li_device_specific);
static ssize_t
fc_reset_statistics(struct device *dev, struct device_attribute *attr,
@@ -1793,7 +2185,29 @@ static struct attribute *fc_statistics_attrs[] = {
&device_attr_host_fc_xid_busy.attr,
&device_attr_host_fc_seq_not_found.attr,
&device_attr_host_fc_non_bls_resp.attr,
+ &device_attr_host_cn_sig_warn.attr,
+ &device_attr_host_cn_sig_alarm.attr,
&device_attr_host_reset_statistics.attr,
+ &device_attr_host_fpin_dn.attr,
+ &device_attr_host_fpin_dn_unknown.attr,
+ &device_attr_host_fpin_dn_timeout.attr,
+ &device_attr_host_fpin_dn_unable_to_route.attr,
+ &device_attr_host_fpin_dn_device_specific.attr,
+ &device_attr_host_fpin_li.attr,
+ &device_attr_host_fpin_li_failure_unknown.attr,
+ &device_attr_host_fpin_li_link_failure_count.attr,
+ &device_attr_host_fpin_li_loss_of_sync_count.attr,
+ &device_attr_host_fpin_li_loss_of_signals_count.attr,
+ &device_attr_host_fpin_li_prim_seq_err_count.attr,
+ &device_attr_host_fpin_li_invalid_tx_word_count.attr,
+ &device_attr_host_fpin_li_invalid_crc_count.attr,
+ &device_attr_host_fpin_li_device_specific.attr,
+ &device_attr_host_fpin_cn.attr,
+ &device_attr_host_fpin_cn_clear.attr,
+ &device_attr_host_fpin_cn_lost_credit.attr,
+ &device_attr_host_fpin_cn_credit_stall.attr,
+ &device_attr_host_fpin_cn_oversubscription.attr,
+ &device_attr_host_fpin_cn_device_specific.attr,
NULL
};
@@ -2177,6 +2591,7 @@ fc_attach_transport(struct fc_function_template *ft)
i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
i->rport_attr_cont.ac.class = &fc_rport_class.class;
i->rport_attr_cont.ac.match = fc_rport_match;
+ i->rport_attr_cont.statistics = &fc_rport_statistics_group;
transport_container_register(&i->rport_attr_cont);
i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 2eb3e4f9375a..2e68c0a87698 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2313,7 +2313,9 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
return conn;
release_conn_ref:
- put_device(&conn->dev);
+ device_unregister(&conn->dev);
+ put_device(&session->dev);
+ return NULL;
release_parent_ref:
put_device(&session->dev);
free_conn:
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f3d5b1bbd5aa..c37dd15d16d2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -117,12 +117,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
sshdr = &sshdr_tmp;
for(i = 0; i < DV_RETRIES; i++) {
+ /*
+ * The purpose of the RQF_PM flag below is to bypass the
+ * SDEV_QUIESCE state.
+ */
result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
sshdr, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
- 0, NULL);
+ RQF_PM, NULL);
if (driver_byte(result) != DRIVER_SENSE ||
sshdr->sense_key != UNIT_ATTENTION)
break;
@@ -1005,23 +1009,26 @@ spi_dv_device(struct scsi_device *sdev)
*/
lock_system_sleep();
+ if (scsi_autopm_get_device(sdev))
+ goto unlock_system_sleep;
+
if (unlikely(spi_dv_in_progress(starget)))
- goto unlock;
+ goto put_autopm;
if (unlikely(scsi_device_get(sdev)))
- goto unlock;
+ goto put_autopm;
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
if (unlikely(!buffer))
- goto out_put;
+ goto put_sdev;
/* We need to verify that the actual device will quiesce; the
* later target quiesce is just a nice to have */
if (unlikely(scsi_device_quiesce(sdev)))
- goto out_free;
+ goto free_buffer;
scsi_target_quiesce(starget);
@@ -1041,12 +1048,16 @@ spi_dv_device(struct scsi_device *sdev)
spi_initial_dv(starget) = 1;
- out_free:
+free_buffer:
kfree(buffer);
- out_put:
+
+put_sdev:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
-unlock:
+put_autopm:
+ scsi_autopm_put_device(sdev);
+
+unlock_system_sleep:
unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index cba1cf6a1c12..1e939a2a387f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
res = mutex_lock_interruptible(&rport->mutex);
if (res)
goto out;
- scsi_target_block(&shost->shost_gendev);
+ if (rport->state != SRP_RPORT_FAIL_FAST)
+ /*
+ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+ * later is ok though, scsi_internal_device_unblock_nowait()
+ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
+ */
+ scsi_target_block(&shost->shost_gendev);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 682cf08ab041..f1553a453616 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -32,7 +32,7 @@
*/
unsigned char *scsi_bios_ptable(struct block_device *dev)
{
- struct address_space *mapping = dev->bd_contains->bd_inode->i_mapping;
+ struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping;
unsigned char *res = NULL;
struct page *page;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 656bcf4940d6..a3d2d4bc4a3d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -630,13 +630,11 @@ static struct scsi_driver sd_template = {
};
/*
- * Dummy kobj_map->probe function.
- * The default ->probe function will call modprobe, which is
- * pointless as this module is already loaded.
+ * Don't request a new module, as that could deadlock in multipath
+ * environment.
*/
-static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data)
+static void sd_default_probe(dev_t devt)
{
- return NULL;
}
/*
@@ -986,8 +984,10 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
}
}
- if (sdp->no_write_same)
+ if (sdp->no_write_same) {
+ rq->rq_flags |= RQF_QUIET;
return BLK_STS_TARGET;
+ }
if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
return sd_setup_write_same16_cmnd(cmd, false);
@@ -1750,10 +1750,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
static void sd_rescan(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
- int ret;
- ret = sd_revalidate_disk(sdkp->disk);
- revalidate_disk_size(sdkp->disk, ret == 0);
+ sd_revalidate_disk(sdkp->disk);
}
static int sd_ioctl(struct block_device *bdev, fmode_t mode,
@@ -3265,8 +3263,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sdkp->first_scan = 0;
- set_capacity_revalidate_and_notify(disk,
- logical_to_sectors(sdp, sdkp->capacity), false);
+ set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
kfree(buffer);
@@ -3276,7 +3273,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
* capacity to 0.
*/
if (sd_zbc_revalidate_zones(sdkp))
- set_capacity_revalidate_and_notify(disk, 0, false);
+ set_capacity_and_notify(disk, 0);
out:
return 0;
@@ -3515,10 +3512,8 @@ static int sd_probe(struct device *dev)
static int sd_remove(struct device *dev)
{
struct scsi_disk *sdkp;
- dev_t devt;
sdkp = dev_get_drvdata(dev);
- devt = disk_devt(sdkp->disk);
scsi_autopm_get_device(sdkp->device);
async_synchronize_full_domain(&scsi_sd_pm_domain);
@@ -3528,9 +3523,6 @@ static int sd_remove(struct device *dev)
free_opal_dev(sdkp->opal_dev);
- blk_register_region(devt, SD_MINORS, NULL,
- sd_default_probe, NULL, NULL);
-
mutex_lock(&sd_ref_mutex);
dev_set_drvdata(dev, NULL);
put_device(&sdkp->dev);
@@ -3720,11 +3712,9 @@ static int __init init_sd(void)
SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
for (i = 0; i < SD_MAJORS; i++) {
- if (register_blkdev(sd_major(i), "sd") != 0)
+ if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
continue;
majors++;
- blk_register_region(sd_major(i), SD_MINORS, NULL,
- sd_default_probe, NULL, NULL);
}
if (!majors)
@@ -3797,10 +3787,8 @@ static void __exit exit_sd(void)
class_unregister(&sd_disk_class);
- for (i = 0; i < SD_MAJORS; i++) {
- blk_unregister_region(sd_major(i), SD_MINORS);
+ for (i = 0; i < SD_MAJORS; i++)
unregister_blkdev(sd_major(i), "sd");
- }
}
module_init(init_sd);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 9d0229656681..c53f456fbd09 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.16-010"
+#define DRIVER_VERSION "1.2.16-012"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
#define DRIVER_RELEASE 16
-#define DRIVER_REVISION 10
+#define DRIVER_REVISION 12
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -345,10 +345,9 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
device->in_remove = true;
}
-static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
{
- return device->in_remove && !ctrl_info->in_shutdown;
+ return device->in_remove;
}
static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
@@ -5347,8 +5346,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
atomic_inc(&device->scsi_cmds_outstanding);
- if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
- device)) {
+ if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
@@ -8031,8 +8029,6 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
if (!ctrl_info)
return;
- ctrl_info->in_shutdown = true;
-
pqi_remove_ctrl(ctrl_info);
}
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 999870eb9ed8..c9b00b3368d7 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -549,7 +549,6 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info);
out:
bsg_job_done(job, rc, reslen);
- pqi_ctrl_unbusy(ctrl_info);
}
struct sas_function_template pqi_sas_transport_functions = {
.get_linkerrors = pqi_sas_get_linkerrors,
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index fd4b582110b2..e4633b84c556 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -416,19 +416,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
goto out;
}
- /*
- * we do lazy blocksize switching (when reading XA sectors,
- * see CDROMREADMODE2 ioctl)
- */
s_size = cd->device->sector_size;
- if (s_size > 2048) {
- if (!in_interrupt())
- sr_set_blocklength(cd, 2048);
- else
- scmd_printk(KERN_INFO, SCpnt,
- "can't switch blocksize: in interrupt\n");
- }
-
if (s_size != 512 && s_size != 1024 && s_size != 2048) {
scmd_printk(KERN_ERR, SCpnt, "bad sector size %d\n", s_size);
goto out;
@@ -701,11 +689,6 @@ error_out:
static void sr_release(struct cdrom_device_info *cdi)
{
- struct scsi_cd *cd = cdi->handle;
-
- if (cd->device->sector_size > 2048)
- sr_set_blocklength(cd, 2048);
-
}
static int sr_probe(struct device *dev)
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ffcf902da390..5703f8400b73 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -549,6 +549,8 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
cgc.timeout = IOCTL_TIMEOUT;
rc = sr_do_ioctl(cd, &cgc);
+ if (blksize != CD_FRAMESIZE)
+ rc |= sr_set_blocklength(cd, CD_FRAMESIZE);
return rc;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e2e5356a997d..43f7624508a9 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2846,7 +2846,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
case MTNOP:
DEBC_printk(STp, "No op on tape.\n");
return 0; /* Should do something ? */
- break;
case MTRETEN:
cmd[0] = START_STOP;
if (STp->immediate) {
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index d4f10c0d813c..40473e4f850f 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -685,6 +685,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
done(cmd);
return 0;
}
+ break;
default:
break;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index ded00a89bfc4..2e4fa77445fd 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -399,6 +399,7 @@ static int storvsc_timeout = 180;
static struct scsi_transport_template *fc_transport_template;
#endif
+static struct scsi_host_template scsi_driver;
static void storvsc_on_channel_callback(void *context);
#define STORVSC_MAX_LUNS_PER_TARGET 255
@@ -698,6 +699,12 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
+ /*
+ * The size of vmbus_requestor is an upper bound on the number of requests
+ * that can be in-progress at any one time across all channels.
+ */
+ new_sc->rqstor_size = scsi_driver.can_queue;
+
ret = vmbus_open(new_sc,
storvsc_ringbuffer_size,
storvsc_ringbuffer_size,
@@ -1242,9 +1249,17 @@ static void storvsc_on_channel_callback(void *context)
foreach_vmbus_pkt(desc, channel) {
void *packet = hv_pkt_data(desc);
struct storvsc_cmd_request *request;
+ u64 cmd_rqst;
+
+ cmd_rqst = vmbus_request_addr(&channel->requestor,
+ desc->trans_id);
+ if (cmd_rqst == VMBUS_RQST_ERROR) {
+ dev_err(&device->device,
+ "Incorrect transaction id\n");
+ continue;
+ }
- request = (struct storvsc_cmd_request *)
- ((unsigned long)desc->trans_id);
+ request = (struct storvsc_cmd_request *)(unsigned long)cmd_rqst;
if (request == &stor_device->init_request ||
request == &stor_device->reset_request) {
@@ -1265,6 +1280,12 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
+ /*
+ * The size of vmbus_requestor is an upper bound on the number of requests
+ * that can be in-progress at any one time across all channels.
+ */
+ device->channel->rqstor_size = scsi_driver.can_queue;
+
ret = vmbus_open(device->channel,
ring_size,
ring_size,
@@ -1572,7 +1593,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
struct vstor_packet *vstor_packet;
int ret, t;
-
stor_device = get_out_stor_device(device);
if (!stor_device)
return FAILED;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index a9fe092a4906..255a2d48d421 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -4596,7 +4596,6 @@ static void sym_int_sir(struct sym_hcb *np)
scr_to_cpu(np->lastmsg), np->msgout[0]);
}
goto out_clrack;
- break;
default:
goto out_reject;
}
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index dcdb4eb1f90b..b915b38c2b27 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -72,6 +72,7 @@ config SCSI_UFS_DWC_TC_PCI
config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support"
depends on SCSI_UFSHCD
+ depends on HAS_IOMEM
help
This selects the UFS host controller support. Select this if
you have an UFS controller on Platform bus.
@@ -99,7 +100,7 @@ config SCSI_UFS_DWC_TC_PLATFORM
config SCSI_UFS_QCOM
tristate "QCOM specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
- select QCOM_SCM
+ select QCOM_SCM if SCSI_UFS_CRYPTO
select RESET_CONTROLLER
help
This selects the QCOM specific additions to UFSHCD platform driver.
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index da065a259f6e..149391faa19c 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -221,8 +221,7 @@ static int cdns_ufs_init(struct ufs_hba *hba)
return -ENOMEM;
ufshcd_set_variant(hba, host);
- if (hba->vops && hba->vops->phy_initialization)
- status = hba->vops->phy_initialization(hba);
+ status = ufshcd_vops_phy_initialization(hba);
return status;
}
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index 5e6b95dbb578..a8770ff14588 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -617,20 +617,7 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
goto out;
}
-
- ufs_exynos_cap.tx_lanes = UFS_EXYNOS_LIMIT_NUM_LANES_TX;
- ufs_exynos_cap.rx_lanes = UFS_EXYNOS_LIMIT_NUM_LANES_RX;
- ufs_exynos_cap.hs_rx_gear = UFS_EXYNOS_LIMIT_HSGEAR_RX;
- ufs_exynos_cap.hs_tx_gear = UFS_EXYNOS_LIMIT_HSGEAR_TX;
- ufs_exynos_cap.pwm_rx_gear = UFS_EXYNOS_LIMIT_PWMGEAR_RX;
- ufs_exynos_cap.pwm_tx_gear = UFS_EXYNOS_LIMIT_PWMGEAR_TX;
- ufs_exynos_cap.rx_pwr_pwm = UFS_EXYNOS_LIMIT_RX_PWR_PWM;
- ufs_exynos_cap.tx_pwr_pwm = UFS_EXYNOS_LIMIT_TX_PWR_PWM;
- ufs_exynos_cap.rx_pwr_hs = UFS_EXYNOS_LIMIT_RX_PWR_HS;
- ufs_exynos_cap.tx_pwr_hs = UFS_EXYNOS_LIMIT_TX_PWR_HS;
- ufs_exynos_cap.hs_rate = UFS_EXYNOS_LIMIT_HS_RATE;
- ufs_exynos_cap.desired_working_mode =
- UFS_EXYNOS_LIMIT_DESIRED_MODE;
+ ufshcd_init_pwr_dev_param(&ufs_exynos_cap);
ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap,
dev_max_params, dev_req_params);
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
index 76d6e39efb2f..06ee565f7eb0 100644
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ b/drivers/scsi/ufs/ufs-exynos.h
@@ -90,19 +90,6 @@ struct exynos_ufs;
#define SLOW 1
#define FAST 2
-#define UFS_EXYNOS_LIMIT_NUM_LANES_RX 2
-#define UFS_EXYNOS_LIMIT_NUM_LANES_TX 2
-#define UFS_EXYNOS_LIMIT_HSGEAR_RX UFS_HS_G3
-#define UFS_EXYNOS_LIMIT_HSGEAR_TX UFS_HS_G3
-#define UFS_EXYNOS_LIMIT_PWMGEAR_RX UFS_PWM_G4
-#define UFS_EXYNOS_LIMIT_PWMGEAR_TX UFS_PWM_G4
-#define UFS_EXYNOS_LIMIT_RX_PWR_PWM SLOW_MODE
-#define UFS_EXYNOS_LIMIT_TX_PWR_PWM SLOW_MODE
-#define UFS_EXYNOS_LIMIT_RX_PWR_HS FAST_MODE
-#define UFS_EXYNOS_LIMIT_TX_PWR_HS FAST_MODE
-#define UFS_EXYNOS_LIMIT_HS_RATE PA_HS_MODE_B
-#define UFS_EXYNOS_LIMIT_DESIRED_MODE FAST
-
#define RX_ADV_FINE_GRAN_SUP_EN 0x1
#define RX_ADV_FINE_GRAN_STEP_VAL 0x3
#define RX_ADV_MIN_ACTV_TIME_CAP 0x9
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 074a6a055a4c..0aa58131e791 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -293,18 +293,7 @@ static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
{
- hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
- hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
- hisi_param->hs_rx_gear = UFS_HISI_LIMIT_HSGEAR_RX;
- hisi_param->hs_tx_gear = UFS_HISI_LIMIT_HSGEAR_TX;
- hisi_param->pwm_rx_gear = UFS_HISI_LIMIT_PWMGEAR_RX;
- hisi_param->pwm_tx_gear = UFS_HISI_LIMIT_PWMGEAR_TX;
- hisi_param->rx_pwr_pwm = UFS_HISI_LIMIT_RX_PWR_PWM;
- hisi_param->tx_pwr_pwm = UFS_HISI_LIMIT_TX_PWR_PWM;
- hisi_param->rx_pwr_hs = UFS_HISI_LIMIT_RX_PWR_HS;
- hisi_param->tx_pwr_hs = UFS_HISI_LIMIT_TX_PWR_HS;
- hisi_param->hs_rate = UFS_HISI_LIMIT_HS_RATE;
- hisi_param->desired_working_mode = UFS_HISI_LIMIT_DESIRED_MODE;
+ ufshcd_init_pwr_dev_param(hisi_param);
}
static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h
index 3231d3d81c98..5a90c0f4e90c 100644
--- a/drivers/scsi/ufs/ufs-hisi.h
+++ b/drivers/scsi/ufs/ufs-hisi.h
@@ -76,19 +76,6 @@ enum {
#define SLOW 1
#define FAST 2
-#define UFS_HISI_LIMIT_NUM_LANES_RX 2
-#define UFS_HISI_LIMIT_NUM_LANES_TX 2
-#define UFS_HISI_LIMIT_HSGEAR_RX UFS_HS_G3
-#define UFS_HISI_LIMIT_HSGEAR_TX UFS_HS_G3
-#define UFS_HISI_LIMIT_PWMGEAR_RX UFS_PWM_G4
-#define UFS_HISI_LIMIT_PWMGEAR_TX UFS_PWM_G4
-#define UFS_HISI_LIMIT_RX_PWR_PWM SLOW_MODE
-#define UFS_HISI_LIMIT_TX_PWR_PWM SLOW_MODE
-#define UFS_HISI_LIMIT_RX_PWR_HS FAST_MODE
-#define UFS_HISI_LIMIT_TX_PWR_HS FAST_MODE
-#define UFS_HISI_LIMIT_HS_RATE PA_HS_MODE_B
-#define UFS_HISI_LIMIT_DESIRED_MODE FAST
-
#define UFS_HISI_CAP_RESERVED BIT(0)
#define UFS_HISI_CAP_PHY10nm BIT(1)
diff --git a/drivers/scsi/ufs/ufs-mediatek-trace.h b/drivers/scsi/ufs/ufs-mediatek-trace.h
new file mode 100644
index 000000000000..895e82ea6ece
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-mediatek-trace.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ufs_mtk
+
+#if !defined(_TRACE_EVENT_UFS_MEDIATEK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_UFS_MEDIATEK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(ufs_mtk_event,
+ TP_PROTO(unsigned int type, unsigned int data),
+ TP_ARGS(type, data),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, type)
+ __field(unsigned int, data)
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->data = data;
+ ),
+
+ TP_printk("ufs:event=%u data=%u",
+ __entry->type, __entry->data)
+ );
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
+#define TRACE_INCLUDE_FILE ufs-mediatek-trace
+#include <trace/define_trace.h>
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 8df73bc2f8cb..80618af7c872 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -24,10 +24,16 @@
#include "unipro.h"
#include "ufs-mediatek.h"
+#define CREATE_TRACE_POINTS
+#include "ufs-mediatek-trace.h"
+
#define ufs_mtk_smc(cmd, val, res) \
arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
cmd, val, 0, 0, 0, 0, 0, &(res))
+#define ufs_mtk_va09_pwr_ctrl(res, on) \
+ ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
+
#define ufs_mtk_crypto_ctrl(res, enable) \
ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
@@ -45,18 +51,8 @@ static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
END_FIX
};
-static const struct ufs_mtk_host_cfg ufs_mtk_mt8192_cfg = {
- .caps = UFS_MTK_CAP_BOOST_CRYPT_ENGINE,
-};
-
static const struct of_device_id ufs_mtk_of_match[] = {
- {
- .compatible = "mediatek,mt8183-ufshci",
- },
- {
- .compatible = "mediatek,mt8192-ufshci",
- .data = &ufs_mtk_mt8192_cfg
- },
+ { .compatible = "mediatek,mt8183-ufshci" },
{},
};
@@ -64,7 +60,21 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+ return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+}
+
+static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
+}
+
+static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
}
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
@@ -158,6 +168,7 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ unsigned long flags;
if (status == PRE_CHANGE) {
if (host->unipro_lpm) {
@@ -169,6 +180,17 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
if (hba->caps & UFSHCD_CAP_CRYPTO)
ufs_mtk_crypto_enable(hba);
+
+ if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, 0,
+ REG_AUTO_HIBERNATE_IDLE_TIMER);
+ spin_unlock_irqrestore(hba->host->host_lock,
+ flags);
+
+ hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
+ hba->ahit = 0;
+ }
}
return 0;
@@ -300,21 +322,46 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
return -ETIMEDOUT;
}
-static void ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
+static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct phy *mphy = host->mphy;
+ struct arm_smccc_res res;
+ int ret = 0;
- if (!mphy)
- return;
+ if (!mphy || !(on ^ host->mphy_powered_on))
+ return 0;
- if (on && !host->mphy_powered_on)
+ if (on) {
+ if (ufs_mtk_is_va09_supported(hba)) {
+ ret = regulator_enable(host->reg_va09);
+ if (ret < 0)
+ goto out;
+ /* wait 200 us to stablize VA09 */
+ usleep_range(200, 210);
+ ufs_mtk_va09_pwr_ctrl(res, 1);
+ }
phy_power_on(mphy);
- else if (!on && host->mphy_powered_on)
+ } else {
phy_power_off(mphy);
- else
- return;
- host->mphy_powered_on = on;
+ if (ufs_mtk_is_va09_supported(hba)) {
+ ufs_mtk_va09_pwr_ctrl(res, 0);
+ ret = regulator_disable(host->reg_va09);
+ if (ret < 0)
+ goto out;
+ }
+ }
+out:
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to %s va09: %d\n",
+ on ? "enable" : "disable",
+ ret);
+ } else {
+ host->mphy_powered_on = on;
+ }
+
+ return ret;
}
static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
@@ -402,7 +449,7 @@ static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
return ret;
}
-static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
+static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_crypt_cfg *cfg;
@@ -410,11 +457,6 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
struct regulator *reg;
u32 volt;
- host->caps = host->cfg->caps;
-
- if (!ufs_mtk_is_boost_crypt_enabled(hba))
- return;
-
host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
GFP_KERNEL);
if (!host->crypt)
@@ -448,11 +490,54 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
cfg->reg_vcore = reg;
cfg->vcore_volt = volt;
- dev_info(dev, "caps: boost-crypt");
- return;
+ host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
disable_caps:
- host->caps &= ~UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
+ return;
+}
+
+static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ host->reg_va09 = regulator_get(hba->dev, "va09");
+ if (!host->reg_va09)
+ dev_info(hba->dev, "failed to get va09");
+ else
+ host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
+}
+
+static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct device_node *np = hba->dev->of_node;
+
+ if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
+ ufs_mtk_init_boost_crypt(hba);
+
+ if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
+ ufs_mtk_init_va09_pwr_ctrl(hba);
+
+ if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
+ host->caps |= UFS_MTK_CAP_DISABLE_AH8;
+
+ if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
+ host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+
+ dev_info(hba->dev, "caps: 0x%x", host->caps);
+}
+
+static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ ufs_mtk_boost_crypt(hba, up);
+ ufs_mtk_setup_ref_clk(hba, up);
+
+ if (up)
+ phy_power_on(host->mphy);
+ else
+ phy_power_off(host->mphy);
}
/**
@@ -467,8 +552,8 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- int ret = 0;
bool clk_pwr_off = false;
+ int ret = 0;
/*
* In case ufs_mtk_init() is not yet done, simply ignore.
@@ -496,20 +581,33 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
clk_pwr_off = true;
}
- if (clk_pwr_off) {
- ufs_mtk_boost_crypt(hba, on);
- ufs_mtk_setup_ref_clk(hba, on);
- ufs_mtk_mphy_power_on(hba, on);
- }
+ if (clk_pwr_off)
+ ufs_mtk_scale_perf(hba, false);
} else if (on && status == POST_CHANGE) {
- ufs_mtk_mphy_power_on(hba, on);
- ufs_mtk_setup_ref_clk(hba, on);
- ufs_mtk_boost_crypt(hba, on);
+ ufs_mtk_scale_perf(hba, true);
}
return ret;
}
+static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ int ret, ver = 0;
+
+ if (host->hw_ver.major)
+ return;
+
+ /* Set default (minimum) version anyway */
+ host->hw_ver.major = 2;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
+ if (!ret) {
+ if (ver >= UFS_UNIPRO_VER_1_8)
+ host->hw_ver.major = 3;
+ }
+}
+
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@@ -537,17 +635,14 @@ static int ufs_mtk_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
- /* Get host capability and platform data */
id = of_match_device(ufs_mtk_of_match, dev);
if (!id) {
err = -EINVAL;
goto out;
}
- if (id->data) {
- host->cfg = (struct ufs_mtk_host_cfg *)id->data;
- ufs_mtk_init_host_caps(hba);
- }
+ /* Initialize host capability */
+ ufs_mtk_init_host_caps(hba);
err = ufs_mtk_bind_mphy(hba);
if (err)
@@ -568,6 +663,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_WB_EN;
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
+ if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
+ hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -575,6 +673,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
*
* Enable phy clocks specifically here.
*/
+ ufs_mtk_mphy_power_on(hba, true);
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
goto out;
@@ -589,22 +688,13 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_dev_params host_cap;
int ret;
- host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
- host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
- host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
- host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
- host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
- host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
- host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
- host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
- host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
- host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
- host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
- host_cap.desired_working_mode =
- UFS_MTK_LIMIT_DESIRED_MODE;
+ ufshcd_init_pwr_dev_param(&host_cap);
+ host_cap.hs_rx_gear = UFS_HS_G4;
+ host_cap.hs_tx_gear = UFS_HS_G4;
ret = ufshcd_get_pwr_dev_param(&host_cap,
dev_max_params,
@@ -614,6 +704,12 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
__func__);
}
+ if (host->hw_ver.major >= 3) {
+ ret = ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ }
+
return ret;
}
@@ -639,14 +735,14 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
return ret;
}
-static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, bool lpm)
+static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
{
int ret;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
ret = ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- lpm);
+ lpm ? 1 : 0);
if (!ret || !lpm) {
/*
* Forcibly set as non-LPM mode if UIC commands is failed
@@ -664,7 +760,9 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
int ret;
u32 tmp;
- ret = ufs_mtk_unipro_set_pm(hba, false);
+ ufs_mtk_get_controller_version(hba);
+
+ ret = ufs_mtk_unipro_set_lpm(hba, false);
if (ret)
return ret;
@@ -743,7 +841,7 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
return ret;
}
-static void ufs_mtk_device_reset(struct ufs_hba *hba)
+static int ufs_mtk_device_reset(struct ufs_hba *hba)
{
struct arm_smccc_res res;
@@ -764,6 +862,8 @@ static void ufs_mtk_device_reset(struct ufs_hba *hba)
usleep_range(10000, 15000);
dev_info(hba->dev, "device reset done\n");
+
+ return 0;
}
static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
@@ -774,7 +874,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
if (err)
return err;
- err = ufs_mtk_unipro_set_pm(hba, false);
+ err = ufs_mtk_unipro_set_lpm(hba, false);
if (err)
return err;
@@ -795,10 +895,10 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{
int err;
- err = ufs_mtk_unipro_set_pm(hba, true);
+ err = ufs_mtk_unipro_set_lpm(hba, true);
if (err) {
/* Resume UniPro state for following error recovery */
- ufs_mtk_unipro_set_pm(hba, false);
+ ufs_mtk_unipro_set_lpm(hba, false);
return err;
}
@@ -824,40 +924,52 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba);
- if (err) {
- /*
- * Set link as off state enforcedly to trigger
- * ufshcd_host_reset_and_restore() in ufshcd_suspend()
- * for completed host reset.
- */
- ufshcd_set_link_off(hba);
- return -EAGAIN;
- }
+ if (err)
+ goto fail;
+ }
+
+ if (!ufshcd_is_link_active(hba)) {
/*
* Make sure no error will be returned to prevent
* ufshcd_suspend() re-enabling regulators while vreg is still
* in low-power mode.
*/
ufs_mtk_vreg_set_lpm(hba, true);
+ err = ufs_mtk_mphy_power_on(hba, false);
+ if (err)
+ goto fail;
}
return 0;
+fail:
+ /*
+ * Set link as off state enforcedly to trigger
+ * ufshcd_host_reset_and_restore() in ufshcd_suspend()
+ * for completed host reset.
+ */
+ ufshcd_set_link_off(hba);
+ return -EAGAIN;
}
static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
+ err = ufs_mtk_mphy_power_on(hba, true);
+ if (err)
+ goto fail;
+
+ ufs_mtk_vreg_set_lpm(hba, false);
+
if (ufshcd_is_link_hibern8(hba)) {
- ufs_mtk_vreg_set_lpm(hba, false);
err = ufs_mtk_link_set_hpm(hba);
- if (err) {
- err = ufshcd_link_recovery(hba);
- return err;
- }
+ if (err)
+ goto fail;
}
return 0;
+fail:
+ return ufshcd_link_recovery(hba);
}
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
@@ -901,6 +1013,25 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+ if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+ hba->vreg_info.vcc->always_on = true;
+ /*
+ * VCC will be kept always-on thus we don't
+ * need any delay during regulator operations
+ */
+ hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+ UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+ }
+}
+
+static void ufs_mtk_event_notify(struct ufs_hba *hba,
+ enum ufs_event_type evt, void *data)
+{
+ unsigned int val = *(u32 *)data;
+
+ trace_ufs_mtk_event(evt, val);
}
/*
@@ -922,6 +1053,7 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.resume = ufs_mtk_resume,
.dbg_register_dump = ufs_mtk_dbg_register_dump,
.device_reset = ufs_mtk_device_reset,
+ .event_notify = ufs_mtk_event_notify,
};
/**
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 2b6a1312c9bc..3f0d3bb769e8 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -31,22 +31,6 @@
#define REFCLK_REQ_TIMEOUT_US 3000
/*
- * Vendor specific pre-defined parameters
- */
-#define UFS_MTK_LIMIT_NUM_LANES_RX 2
-#define UFS_MTK_LIMIT_NUM_LANES_TX 2
-#define UFS_MTK_LIMIT_HSGEAR_RX UFS_HS_G3
-#define UFS_MTK_LIMIT_HSGEAR_TX UFS_HS_G3
-#define UFS_MTK_LIMIT_PWMGEAR_RX UFS_PWM_G4
-#define UFS_MTK_LIMIT_PWMGEAR_TX UFS_PWM_G4
-#define UFS_MTK_LIMIT_RX_PWR_PWM SLOW_MODE
-#define UFS_MTK_LIMIT_TX_PWR_PWM SLOW_MODE
-#define UFS_MTK_LIMIT_RX_PWR_HS FAST_MODE
-#define UFS_MTK_LIMIT_TX_PWR_HS FAST_MODE
-#define UFS_MTK_LIMIT_HS_RATE PA_HS_MODE_B
-#define UFS_MTK_LIMIT_DESIRED_MODE UFS_HS_MODE
-
-/*
* Other attributes
*/
#define VS_DEBUGCLOCKENABLE 0xD0A1
@@ -69,6 +53,7 @@ enum {
* SiP commands
*/
#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
+#define UFS_MTK_SIP_VA09_PWR_CTRL BIT(0)
#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2)
#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
@@ -94,6 +79,9 @@ enum {
*/
enum ufs_mtk_host_caps {
UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0,
+ UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
+ UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
+ UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
};
struct ufs_mtk_crypt_cfg {
@@ -104,19 +92,22 @@ struct ufs_mtk_crypt_cfg {
int vcore_volt;
};
-struct ufs_mtk_host_cfg {
- enum ufs_mtk_host_caps caps;
+struct ufs_mtk_hw_ver {
+ u8 step;
+ u8 minor;
+ u8 major;
};
struct ufs_mtk_host {
- struct ufs_hba *hba;
struct phy *mphy;
- struct ufs_mtk_host_cfg *cfg;
- struct ufs_mtk_crypt_cfg *crypt;
- enum ufs_mtk_host_caps caps;
+ struct regulator *reg_va09;
struct reset_control *hci_reset;
struct reset_control *unipro_reset;
struct reset_control *crypto_reset;
+ struct ufs_hba *hba;
+ struct ufs_mtk_crypt_cfg *crypt;
+ struct ufs_mtk_hw_ver hw_ver;
+ enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
bool unipro_lpm;
bool ref_clk_enabled;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index f9d6ef356540..2206b1e4b774 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -691,19 +691,8 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
- ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
- ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
- ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
- ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
- ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
- ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
- ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
- ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
- ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
+ ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
- ufs_qcom_cap.desired_working_mode =
- UFS_QCOM_LIMIT_DESIRED_MODE;
if (host->hw_ver.major == 0x1) {
/*
@@ -734,17 +723,9 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ufs_qcom_dev_ref_clk_ctrl(host, true);
if (host->hw_ver.major >= 0x4) {
- if (dev_req_params->gear_tx == UFS_HS_G4) {
- /* INITIAL ADAPT */
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(PA_TXHSADAPTTYPE),
- PA_INITIAL_ADAPT);
- } else {
- /* NO ADAPT */
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(PA_TXHSADAPTTYPE),
- PA_NO_ADAPT);
- }
+ ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
}
break;
case POST_CHANGE:
@@ -863,6 +844,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_CRYPTO;
+ hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
if (host->hw_ver.major >= 0x2) {
host->caps = UFS_QCOM_CAP_QUNIPRO |
@@ -977,6 +959,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
struct platform_device *pdev = to_platform_device(dev);
struct ufs_qcom_host *host;
struct resource *res;
+ struct ufs_clk_info *clki;
if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
return -ENODEV;
@@ -1075,6 +1058,11 @@ static int ufs_qcom_init(struct ufs_hba *hba)
}
}
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core_clk_unipro"))
+ clki->keep_link_active = true;
+ }
+
err = ufs_qcom_init_lane_clks(host);
if (err)
goto out_variant_clear;
@@ -1421,13 +1409,13 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
*
* Toggles the (optional) reset line to reset the attached device.
*/
-static void ufs_qcom_device_reset(struct ufs_hba *hba)
+static int ufs_qcom_device_reset(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
/* reset gpio is optional */
if (!host->device_reset)
- return;
+ return -EOPNOTSUPP;
/*
* The UFS device shall detect reset pulses of 1us, sleep for 10us to
@@ -1438,6 +1426,8 @@ static void ufs_qcom_device_reset(struct ufs_hba *hba)
gpiod_set_value_cansleep(host->device_reset, 0);
usleep_range(10, 15);
+
+ return 0;
}
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 3f4922743b3e..8208e3a3ef59 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -27,18 +27,7 @@
#define SLOW 1
#define FAST 2
-#define UFS_QCOM_LIMIT_NUM_LANES_RX 2
-#define UFS_QCOM_LIMIT_NUM_LANES_TX 2
-#define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G3
-#define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G3
-#define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4
-#define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4
-#define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE
-#define UFS_QCOM_LIMIT_TX_PWR_PWM SLOW_MODE
-#define UFS_QCOM_LIMIT_RX_PWR_HS FAST_MODE
-#define UFS_QCOM_LIMIT_TX_PWR_HS FAST_MODE
#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
-#define UFS_QCOM_LIMIT_DESIRED_MODE FAST
/* QCOM UFS host controller vendor specific registers */
enum {
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index bdcd27faa054..08e72b7eef6a 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -28,6 +28,7 @@ static const char *ufschd_ufs_dev_pwr_mode_to_string(
case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
case UFS_SLEEP_PWR_MODE: return "SLEEP";
case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
+ case UFS_DEEPSLEEP_PWR_MODE: return "DEEPSLEEP";
default: return "UNKNOWN";
}
}
@@ -38,6 +39,7 @@ static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
bool rpm)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_dev_info *dev_info = &hba->dev_info;
unsigned long flags, value;
if (kstrtoul(buf, 0, &value))
@@ -46,6 +48,11 @@ static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
if (value >= UFS_PM_LVL_MAX)
return -EINVAL;
+ if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE &&
+ (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) ||
+ !(dev_info->wspecversion >= 0x310)))
+ return -EINVAL;
+
spin_lock_irqsave(hba->host->host_lock, flags);
if (rpm)
hba->rpm_lvl = value;
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index f8ab16f30fdc..14dfda735adf 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -330,7 +330,6 @@ enum {
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
};
-#define POWER_DESC_MAX_SIZE 0x62
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
/* Attribute bActiveICCLevel parameter bit masks definitions */
@@ -442,6 +441,7 @@ enum ufs_dev_pwr_mode {
UFS_ACTIVE_PWR_MODE = 1,
UFS_SLEEP_PWR_MODE = 2,
UFS_POWERDOWN_PWR_MODE = 3,
+ UFS_DEEPSLEEP_PWR_MODE = 4,
};
#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
@@ -512,6 +512,7 @@ struct ufs_query_res {
struct ufs_vreg {
struct regulator *reg;
const char *name;
+ bool always_on;
bool enabled;
int min_uV;
int max_uV;
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
index 6a901da2d15a..5bb9d3a88795 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ b/drivers/scsi/ufs/ufshcd-dwc.c
@@ -120,13 +120,10 @@ int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
if (status == PRE_CHANGE) {
ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125);
- if (hba->vops->phy_initialization) {
- err = hba->vops->phy_initialization(hba);
- if (err) {
- dev_err(hba->dev, "Phy setup failed (%d)\n",
- err);
- goto out;
- }
+ err = ufshcd_vops_phy_initialization(hba);
+ if (err) {
+ dev_err(hba->dev, "Phy setup failed (%d)\n", err);
+ goto out;
}
} else { /* POST_CHANGE */
err = ufshcd_dwc_link_is_up(hba);
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index df3a564c3e33..fadd566025b8 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -148,6 +148,8 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
{
struct intel_host *host;
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
@@ -163,6 +165,41 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
intel_ltr_hide(hba->dev);
}
+static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+ /*
+ * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
+ * address registers must be restored because the restore kernel can
+ * have used different addresses.
+ */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ int ret = ufshcd_uic_hibern8_exit(hba);
+
+ if (!ret) {
+ ufshcd_set_link_active(hba);
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ /*
+ * Force reset and restore. Any other actions can lead
+ * to an unrecoverable state.
+ */
+ ufshcd_set_link_off(hba);
+ }
+ }
+
+ return 0;
+}
+
static int ufs_intel_ehl_init(struct ufs_hba *hba)
{
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
@@ -174,6 +211,7 @@ static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.init = ufs_intel_common_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
};
static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
@@ -181,6 +219,7 @@ static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
.init = ufs_intel_ehl_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
};
#ifdef CONFIG_PM_SLEEP
@@ -207,6 +246,30 @@ static int ufshcd_pci_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
+
+/**
+ * ufshcd_pci_poweroff - suspend-to-disk poweroff function
+ * @dev: pointer to PCI device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pci_poweroff(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int spm_lvl = hba->spm_lvl;
+ int ret;
+
+ /*
+ * For poweroff we need to set the UFS device to PowerDown mode.
+ * Force spm_lvl to ensure that.
+ */
+ hba->spm_lvl = 5;
+ ret = ufshcd_system_suspend(hba);
+ hba->spm_lvl = spm_lvl;
+ return ret;
+}
+
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
@@ -302,8 +365,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
- ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+ .freeze = ufshcd_pci_suspend,
+ .thaw = ufshcd_pci_resume,
+ .poweroff = ufshcd_pci_poweroff,
+ .restore = ufshcd_pci_resume,
+#endif
SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
ufshcd_pci_runtime_resume,
ufshcd_pci_runtime_idle)
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 3db0af66c71c..1a69949a4ea1 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -92,6 +92,8 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
clki->min_freq = clkfreq[i];
clki->max_freq = clkfreq[i+1];
clki->name = kstrdup(name, GFP_KERNEL);
+ if (!strcmp(name, "ref_clk"))
+ clki->keep_link_active = true;
dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
clki->min_freq, clki->max_freq, clki->name);
list_add_tail(&clki->list, &hba->clk_list_head);
@@ -132,25 +134,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
vreg->max_uA = 0;
}
-
- if (!strcmp(name, "vcc")) {
- if (of_property_read_bool(np, "vcc-supply-1p8")) {
- vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
- vreg->max_uV = UFS_VREG_VCC_1P8_MAX_UV;
- } else {
- vreg->min_uV = UFS_VREG_VCC_MIN_UV;
- vreg->max_uV = UFS_VREG_VCC_MAX_UV;
- }
- } else if (!strcmp(name, "vccq")) {
- vreg->min_uV = UFS_VREG_VCCQ_MIN_UV;
- vreg->max_uV = UFS_VREG_VCCQ_MAX_UV;
- } else if (!strcmp(name, "vccq2")) {
- vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV;
- vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV;
- }
-
- goto out;
-
out:
if (!ret)
*out_vreg = vreg;
@@ -354,6 +337,23 @@ int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
}
EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
+void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
+{
+ dev_param->tx_lanes = 2;
+ dev_param->rx_lanes = 2;
+ dev_param->hs_rx_gear = UFS_HS_G3;
+ dev_param->hs_tx_gear = UFS_HS_G3;
+ dev_param->pwm_rx_gear = UFS_PWM_G4;
+ dev_param->pwm_tx_gear = UFS_PWM_G4;
+ dev_param->rx_pwr_pwm = SLOW_MODE;
+ dev_param->tx_pwr_pwm = SLOW_MODE;
+ dev_param->rx_pwr_hs = FAST_MODE;
+ dev_param->tx_pwr_hs = FAST_MODE;
+ dev_param->hs_rate = PA_HS_MODE_B;
+ dev_param->desired_working_mode = UFS_HS_MODE;
+}
+EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
+
/**
* ufshcd_pltfrm_init - probe routine of the driver
* @pdev: pointer to Platform device handle
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
index b79cdf9129a0..772a8e848098 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.h
@@ -28,6 +28,7 @@ struct ufs_dev_params {
int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param,
struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr);
+void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 0c148fcd24de..fb32d122f2e3 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -163,6 +163,11 @@ struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
{UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
{UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
{UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+ /*
+ * For DeepSleep, the link is first put in hibern8 and then off.
+ * Leaving the link in hibern8 is not supported.
+ */
+ {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
};
static inline enum ufs_dev_pwr_mode
@@ -220,9 +225,8 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
- bool skip_ref_clk);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
@@ -245,6 +249,8 @@ static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
@@ -283,7 +289,8 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
__func__, ret);
- ufshcd_wb_toggle_flush(hba, true);
+ if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
+ ufshcd_wb_toggle_flush(hba, true);
}
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
@@ -348,7 +355,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
unsigned int tag, const char *str)
{
sector_t lba = -1;
- u8 opcode = 0;
+ u8 opcode = 0, group_id = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
@@ -374,13 +381,20 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
lba = cmd->request->bio->bi_iter.bi_sector;
transfer_len = be32_to_cpu(
lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ if (opcode == WRITE_10)
+ group_id = lrbp->cmd->cmnd[6];
+ } else if (opcode == UNMAP) {
+ if (cmd->request) {
+ lba = scsi_get_lba(cmd);
+ transfer_len = blk_rq_bytes(cmd->request);
+ }
}
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
trace_ufshcd_command(dev_name(hba->dev), str, tag,
- doorbell, transfer_len, intr, lba, opcode);
+ doorbell, transfer_len, intr, lba, opcode, group_id);
}
static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
@@ -399,20 +413,25 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
}
}
-static void ufshcd_print_err_hist(struct ufs_hba *hba,
- struct ufs_err_reg_hist *err_hist,
- char *err_name)
+static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
+ char *err_name)
{
int i;
bool found = false;
+ struct ufs_event_hist *e;
+
+ if (id >= UFS_EVT_CNT)
+ return;
+
+ e = &hba->ufs_stats.event[id];
- for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
- int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
+ for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
+ int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
- if (err_hist->tstamp[p] == 0)
+ if (e->tstamp[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
- err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+ e->val[p], ktime_to_us(e->tstamp[p]));
found = true;
}
@@ -420,26 +439,26 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
dev_err(hba->dev, "No record of %s\n", err_name);
}
-static void ufshcd_print_host_regs(struct ufs_hba *hba)
+static void ufshcd_print_evt_hist(struct ufs_hba *hba)
{
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
- "auto_hibern8_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
- "link_startup_fail");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
- "suspend_fail");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
+ ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
+ ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
+ ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
+ ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
+ ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
+ ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
+ "auto_hibern8_err");
+ ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
+ ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
+ "link_startup_fail");
+ ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
+ ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
+ "suspend_fail");
+ ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
+ ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
+ ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
ufshcd_vops_dbg_register_dump(hba);
}
@@ -563,6 +582,23 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
hba->pwr_info.hs_rate);
}
+static void ufshcd_device_reset(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_vops_device_reset(hba);
+
+ if (!err) {
+ ufshcd_set_ufs_dev_active(hba);
+ if (ufshcd_is_wb_allowed(hba)) {
+ hba->wb_enabled = false;
+ hba->wb_buf_flush_enabled = false;
+ }
+ }
+ if (err != -EOPNOTSUPP)
+ ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
+}
+
void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
{
if (!us)
@@ -1102,7 +1138,6 @@ out:
*/
static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
{
- #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
@@ -1113,16 +1148,16 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
memcpy(&new_pwr_info, &hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
- if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
- || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
+ if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
+ hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
/* save the current power mode */
memcpy(&hba->clk_scaling.saved_pwr_info.info,
&hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
/* scale down gear */
- new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
- new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
+ new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
}
}
@@ -1555,6 +1590,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);
ufshcd_enable_irq(hba);
@@ -1714,12 +1750,10 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_disable_irq(hba);
- if (!ufshcd_is_link_active(hba))
- ufshcd_setup_clocks(hba, false);
- else
- /* If link is active, device ref_clk can't be switched off */
- __ufshcd_setup_clocks(hba, false, true);
+ ufshcd_setup_clocks(hba, false);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
/*
* In case you are here to cancel this work the gating state
* would be marked as REQ_CLKS_ON. In this case keep the state
@@ -1751,8 +1785,9 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks ||
- hba->active_uic_cmd || hba->uic_async_done)
+ hba->outstanding_tasks ||
+ hba->active_uic_cmd || hba->uic_async_done ||
+ hba->clk_gating.state == CLKS_OFF)
return;
hba->clk_gating.state = REQ_CLKS_OFF;
@@ -1814,19 +1849,19 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
return -EINVAL;
value = !!value;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
if (value == hba->clk_gating.is_enabled)
goto out;
- if (value) {
- ufshcd_release(hba);
- } else {
- spin_lock_irqsave(hba->host->host_lock, flags);
+ if (value)
+ __ufshcd_release(hba);
+ else
hba->clk_gating.active_reqs++;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
hba->clk_gating.is_enabled = value;
out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return count;
}
@@ -1837,6 +1872,9 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ if (!hba->clk_scaling.min_gear)
+ hba->clk_scaling.min_gear = UFS_HS_G1;
+
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,
@@ -1874,7 +1912,7 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
hba->host->host_no);
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI);
hba->clk_gating.is_enabled = true;
@@ -2557,6 +2595,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
(hba->clk_gating.state != CLKS_ON));
lrbp = &hba->lrb[tag];
+ if (unlikely(lrbp->in_use)) {
+ if (hba->pm_op_in_progress)
+ set_host_byte(cmd, DID_BAD_TARGET);
+ else
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ ufshcd_release(hba);
+ goto out;
+ }
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
@@ -2799,6 +2845,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
init_completion(&wait);
lrbp = &hba->lrb[tag];
+ if (unlikely(lrbp->in_use)) {
+ err = -EBUSY;
+ goto out;
+ }
+
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
@@ -2815,6 +2866,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+out:
ufshcd_add_query_upiu_trace(hba, tag,
err ? "query_complete_err" : "query_complete");
@@ -2960,14 +3012,14 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
- ufshcd_hold(hba, false);
if (!attr_val) {
dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
__func__, opcode);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ ufshcd_hold(hba, false);
+
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -2999,7 +3051,6 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
-out:
ufshcd_release(hba);
return err;
}
@@ -3051,21 +3102,20 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
BUG_ON(!hba);
- ufshcd_hold(hba, false);
if (!desc_buf) {
dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
__func__, opcode);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
__func__, *buf_len);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ ufshcd_hold(hba, false);
+
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3100,7 +3150,6 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
-out:
ufshcd_release(hba);
return err;
}
@@ -3601,6 +3650,22 @@ static int ufshcd_dme_reset(struct ufs_hba *hba)
return ret;
}
+int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
+ int agreed_gear,
+ int adapt_val)
+{
+ int ret;
+
+ if (agreed_gear != UFS_HS_G4)
+ adapt_val = PA_NO_ADAPT;
+
+ ret = ufshcd_dme_set(hba,
+ UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ adapt_val);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
+
/**
* ufshcd_dme_enable - UIC command for DME_ENABLE
* @hba: per adapter instance
@@ -3619,7 +3684,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
+ "dme-enable: error code %d\n", ret);
return ret;
}
@@ -3854,7 +3919,7 @@ out:
if (ret) {
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
}
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -3918,7 +3983,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ret = ufshcd_host_reset_and_restore(hba);
@@ -3931,6 +3996,8 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: link recovery failed, err %d",
__func__, ret);
+ else
+ ufshcd_clear_ua_wluns(hba);
return ret;
}
@@ -4350,8 +4417,10 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
*/
static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
- int retry;
+ int retry_outer = 3;
+ int retry_inner;
+start:
if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
ufshcd_hba_stop(hba);
@@ -4377,13 +4446,17 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
/* wait for the host controller to complete initialization */
- retry = 50;
+ retry_inner = 50;
while (ufshcd_is_hba_active(hba)) {
- if (retry) {
- retry--;
+ if (retry_inner) {
+ retry_inner--;
} else {
dev_err(hba->dev,
"Controller enable failed\n");
+ if (retry_outer) {
+ retry_outer--;
+ goto start;
+ }
return -EIO;
}
usleep_range(1000, 1100);
@@ -4460,14 +4533,21 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
return ufshcd_disable_tx_lcc(hba, true);
}
-void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
- u32 reg)
+void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
{
- reg_hist->reg[reg_hist->pos] = reg;
- reg_hist->tstamp[reg_hist->pos] = ktime_get();
- reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
+ struct ufs_event_hist *e;
+
+ if (id >= UFS_EVT_CNT)
+ return;
+
+ e = &hba->ufs_stats.event[id];
+ e->val[e->pos] = val;
+ e->tstamp[e->pos] = ktime_get();
+ e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
+
+ ufshcd_vops_event_notify(hba, id, &val);
}
-EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
+EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
/**
* ufshcd_link_startup - Initialize unipro link startup
@@ -4496,7 +4576,8 @@ link_startup:
/* check if device is detected by inter-connect layer */
if (!ret && !ufshcd_is_device_present(hba)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+ ufshcd_update_evt_hist(hba,
+ UFS_EVT_LINK_STARTUP_FAIL,
0);
dev_err(hba->dev, "%s: Device not present\n", __func__);
ret = -ENXIO;
@@ -4509,7 +4590,8 @@ link_startup:
* succeeds. So reset the local Uni-Pro and try again.
*/
if (ret && ufshcd_hba_enable(hba)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+ ufshcd_update_evt_hist(hba,
+ UFS_EVT_LINK_STARTUP_FAIL,
(u32)ret);
goto out;
}
@@ -4517,7 +4599,8 @@ link_startup:
if (ret) {
/* failed to get the link up... retire */
- ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+ ufshcd_update_evt_hist(hba,
+ UFS_EVT_LINK_STARTUP_FAIL,
(u32)ret);
goto out;
}
@@ -4551,7 +4634,7 @@ out:
dev_err(hba->dev, "link startup failed %d\n", ret);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
}
return ret;
}
@@ -4906,12 +4989,13 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev,
"OCS error from controller = %x for tag %d\n",
ocs, lrbp->task_tag);
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
break;
} /* end of switch */
- if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
+ if ((host_byte(result) != DID_OK) &&
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -4964,9 +5048,11 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
struct scsi_cmnd *cmd;
int result;
int index;
+ bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
+ lrbp->in_use = false;
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
@@ -4979,15 +5065,17 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
__ufshcd_release(hba);
+ update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
"dev_complete");
complete(hba->dev_cmd.complete);
+ update_scaling = true;
}
}
- if (ufshcd_is_clkscaling_supported(hba))
+ if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
hba->clk_scaling.active_reqs--;
}
@@ -5352,9 +5440,6 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
- if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
- return;
-
if (enable)
ufshcd_wb_buf_flush_enable(hba);
else
@@ -5632,7 +5717,9 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
pm_runtime_get_sync(hba->dev);
- if (pm_runtime_suspended(hba->dev)) {
+ if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
+ enum ufs_pm_op pm_op;
+
/*
* Don't assume anything of pm_runtime_get_sync(), if
* resume fails, irq and clocks can be OFF, and powers
@@ -5647,7 +5734,8 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
ufshcd_release(hba);
- ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
+ pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
+ ufshcd_vops_resume(hba, pm_op);
} else {
ufshcd_hold(hba, false);
if (hba->clk_scaling.is_allowed) {
@@ -5668,7 +5756,7 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
- return (hba->ufshcd_state == UFSHCD_STATE_ERROR ||
+ return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
ufshcd_is_link_broken(hba))));
}
@@ -5681,6 +5769,7 @@ static void ufshcd_recover_pm_error(struct ufs_hba *hba)
struct request_queue *q;
int ret;
+ hba->is_sys_suspended = false;
/*
* Set RPM status of hba device to RPM_ACTIVE,
* this also clears its runtime error.
@@ -5739,11 +5828,13 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
+ down(&hba->eh_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ up(&hba->eh_sem);
return;
}
ufshcd_set_eh_in_progress(hba);
@@ -5751,20 +5842,18 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_err_handling_prepare(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_scsi_block_requests(hba);
- /*
- * A full reset and restore might have happened after preparation
- * is finished, double check whether we should stop.
- */
- if (ufshcd_err_handling_should_stop(hba)) {
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- goto out;
- }
hba->ufshcd_state = UFSHCD_STATE_RESET;
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
+ /*
+ * A full reset and restore might have happened after preparation
+ * is finished, double check whether we should stop.
+ */
+ if (ufshcd_err_handling_should_stop(hba))
+ goto skip_err_handling;
+
if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
bool ret;
@@ -5772,17 +5861,10 @@ static void ufshcd_err_handler(struct work_struct *work)
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
ret = ufshcd_quirk_dl_nac_errors(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (!ret && !hba->force_reset && ufshcd_is_link_active(hba))
+ if (!ret && ufshcd_err_handling_should_stop(hba))
goto skip_err_handling;
}
- if (hba->force_reset || ufshcd_is_link_broken(hba) ||
- ufshcd_is_saved_err_fatal(hba) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
- needs_reset = true;
-
if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
(hba->saved_uic_err &&
(hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
@@ -5791,7 +5873,7 @@ static void ufshcd_err_handler(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -5802,8 +5884,14 @@ static void ufshcd_err_handler(struct work_struct *work)
* transfers forcefully because they will get cleared during
* host reset and restore
*/
- if (needs_reset)
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
+ needs_reset = true;
goto do_reset;
+ }
/*
* If LINERESET was caught, UFS might have been put to PWM mode,
@@ -5911,12 +5999,14 @@ skip_err_handling:
dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
}
-
-out:
ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba);
+ up(&hba->eh_sem);
+
+ if (!err && needs_reset)
+ ufshcd_clear_ua_wluns(hba);
}
/**
@@ -5936,7 +6026,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
(reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
@@ -5966,7 +6056,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
(reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
@@ -5985,7 +6075,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
if ((reg & UIC_NETWORK_LAYER_ERROR) &&
(reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
retval |= IRQ_HANDLED;
}
@@ -5993,7 +6083,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
(reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
retval |= IRQ_HANDLED;
}
@@ -6001,7 +6091,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
if ((reg & UIC_DME_ERROR) &&
(reg & UIC_DME_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
retval |= IRQ_HANDLED;
}
@@ -6043,7 +6133,8 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS) {
- ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
+ ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
+ hba->errors);
queue_eh_work = true;
}
@@ -6060,7 +6151,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
__func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
"Enter" : "Exit",
hba->errors, ufshcd_get_upmcrs(hba));
- ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
+ ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
hba->errors);
ufshcd_set_link_broken(hba);
queue_eh_work = true;
@@ -6075,7 +6166,8 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
hba->saved_uic_err |= hba->uic_error;
/* dump controller state before resetting */
- if ((hba->saved_err & (INT_FATAL_ERRORS)) ||
+ if ((hba->saved_err &
+ (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
(hba->saved_uic_err &&
(hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
@@ -6209,9 +6301,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
}
- if (enabled_intr_status && retval == IRQ_NONE) {
- dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
- __func__, intr_status);
+ if (enabled_intr_status && retval == IRQ_NONE &&
+ !ufshcd_eh_in_progress(hba)) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+ __func__,
+ intr_status,
+ hba->ufs_stats.last_intr_status,
+ enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -6255,7 +6351,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
req->end_io_data = &wait;
free_slot = req->tag;
WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@@ -6407,8 +6506,12 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
init_completion(&wait);
lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
+ if (unlikely(lrbp->in_use)) {
+ err = -EBUSY;
+ goto out;
+ }
+ WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
lrbp->sense_bufflen = 0;
lrbp->sense_buffer = NULL;
@@ -6480,6 +6583,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
}
}
+out:
blk_put_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
@@ -6568,19 +6672,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
- unsigned int tag;
u32 pos;
int err;
- u8 resp = 0xF;
- struct ufshcd_lrb *lrbp;
+ u8 resp = 0xF, lun;
unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
- tag = cmd->request->tag;
- lrbp = &hba->lrb[tag];
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err)
err = resp;
@@ -6589,7 +6690,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* clear the commands that were pending for corresponding LUN */
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[pos].lun == lrbp->lun) {
+ if (hba->lrb[pos].lun == lun) {
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
@@ -6601,7 +6702,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
out:
hba->req_abort_count = 0;
- ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
+ ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
if (!err) {
err = SUCCESS;
} else {
@@ -6624,7 +6725,8 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
/**
* ufshcd_try_to_abort_task - abort a specific task
- * @cmd: SCSI command pointer
+ * @hba: Pointer to adapter instance
+ * @tag: Task tag/index to be aborted
*
* Abort the pending command in device by sending UFS_ABORT_TASK task management
* command, and in host controller by clearing the door-bell register. There can
@@ -6729,16 +6831,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
BUG();
}
- /*
- * Task abort to the device W-LUN is illegal. When this command
- * will fail, due to spec violation, scsi err handling next step
- * will be to send LU reset which, again, is a spec violation.
- * To avoid these unnecessary/illegal step we skip to the last error
- * handling stage: reset and restore.
- */
- if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
- return ufshcd_eh_host_reset_handler(cmd);
-
ufshcd_hold(hba, false);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return SUCCESS */
@@ -6759,10 +6851,10 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* to reduce repeated printouts. For other aborted requests only print
* basic details.
*/
- scsi_print_command(hba->lrb[tag].cmd);
+ scsi_print_command(cmd);
if (!hba->req_abort_count) {
- ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
- ufshcd_print_host_regs(hba);
+ ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
+ ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_trs(hba, 1 << tag, true);
@@ -6778,6 +6870,29 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto cleanup;
}
+ /*
+ * Task abort to the device W-LUN is illegal. When this command
+ * will fail, due to spec violation, scsi err handling next step
+ * will be to send LU reset which, again, is a spec violation.
+ * To avoid these unnecessary/illegal steps, first we clean up
+ * the lrb taken by this cmd and mark the lrb as in_use, then
+ * queue the eh_work and bail.
+ */
+ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
+ ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
+ spin_lock_irqsave(host->host_lock, flags);
+ if (lrbp->cmd) {
+ __ufshcd_transfer_req_compl(hba, (1UL << tag));
+ __set_bit(tag, &hba->outstanding_reqs);
+ lrbp->in_use = true;
+ hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
+ }
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+ goto out;
+ }
+
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip)
err = -EIO;
@@ -6836,16 +6951,14 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshcd_set_clk_freq(hba, true);
err = ufshcd_hba_enable(hba);
- if (err)
- goto out;
/* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba, false);
+ if (!err)
+ err = ufshcd_probe_hba(hba, false);
-out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
- ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
+ ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
return err;
}
@@ -6879,7 +6992,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
do {
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
@@ -6891,6 +7004,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
*/
scsi_report_bus_reset(hba->host, 0);
if (err) {
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
hba->saved_err |= saved_err;
hba->saved_uic_err |= saved_uic_err;
}
@@ -7092,7 +7206,6 @@ static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
{
int ret = 0;
- struct scsi_device *sdev_rpmb;
struct scsi_device *sdev_boot;
hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
@@ -7105,14 +7218,14 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
scsi_device_put(hba->sdev_ufs_device);
- sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
- if (IS_ERR(sdev_rpmb)) {
- ret = PTR_ERR(sdev_rpmb);
+ if (IS_ERR(hba->sdev_rpmb)) {
+ ret = PTR_ERR(hba->sdev_rpmb);
goto remove_sdev_ufs_device;
}
- ufshcd_blk_pm_runtime_init(sdev_rpmb);
- scsi_device_put(sdev_rpmb);
+ ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
+ scsi_device_put(hba->sdev_rpmb);
sdev_boot = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
@@ -7613,6 +7726,8 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret)
goto out;
+ ufshcd_clear_ua_wluns(hba);
+
/* Initialize devfreq after UFS device is detected */
if (ufshcd_is_clkscaling_supported(hba)) {
memcpy(&hba->clk_scaling.saved_pwr_info.info,
@@ -7636,6 +7751,63 @@ out:
return ret;
}
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
+
+static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
+{
+ struct scsi_device *sdp;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
+ sdp = hba->sdev_ufs_device;
+ else if (wlun == UFS_UPIU_RPMB_WLUN)
+ sdp = hba->sdev_rpmb;
+ else
+ BUG();
+ if (sdp) {
+ ret = scsi_device_get(sdp);
+ if (!ret && !scsi_device_online(sdp)) {
+ ret = -ENODEV;
+ scsi_device_put(sdp);
+ }
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (ret)
+ goto out_err;
+
+ ret = ufshcd_send_request_sense(hba, sdp);
+ scsi_device_put(sdp);
+out_err:
+ if (ret)
+ dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
+ __func__, wlun, ret);
+ return ret;
+}
+
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->wlun_dev_clr_ua)
+ goto out;
+
+ ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
+ if (!ret)
+ ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
+ if (!ret)
+ hba->wlun_dev_clr_ua = false;
+out:
+ if (ret)
+ dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
+ __func__, ret);
+ return ret;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -7739,8 +7911,10 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
struct ufs_hba *hba = (struct ufs_hba *)data;
int ret;
+ down(&hba->eh_sem);
/* Initialize hba, detect and initialize UFS device */
ret = ufshcd_probe_hba(hba, true);
+ up(&hba->eh_sem);
if (ret)
goto out;
@@ -7895,7 +8069,7 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg || !vreg->enabled)
+ if (!vreg || !vreg->enabled || vreg->always_on)
goto out;
ret = regulator_disable(vreg->reg);
@@ -7988,8 +8162,7 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
return 0;
}
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
- bool skip_ref_clk)
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
{
int ret = 0;
struct ufs_clk_info *clki;
@@ -8007,7 +8180,12 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
- if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
+ /*
+ * Don't disable clocks which are needed
+ * to keep the link active.
+ */
+ if (ufshcd_is_link_active(hba) &&
+ clki->keep_link_active)
continue;
clk_state_changed = on ^ clki->enabled;
@@ -8052,11 +8230,6 @@ out:
return ret;
}
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
-{
- return __ufshcd_setup_clocks(hba, on, false);
-}
-
static int ufshcd_init_clocks(struct ufs_hba *hba)
{
int ret = 0;
@@ -8113,15 +8286,9 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
err = ufshcd_vops_init(hba);
if (err)
- goto out;
-
- err = ufshcd_vops_setup_regulators(hba, true);
- if (err)
- ufshcd_vops_exit(hba);
-out:
- if (err)
dev_err(hba->dev, "%s: variant %s init failed err %d\n",
__func__, ufshcd_get_var_name(hba), err);
+out:
return err;
}
@@ -8130,8 +8297,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
if (!hba->vops)
return;
- ufshcd_vops_setup_regulators(hba, false);
-
ufshcd_vops_exit(hba);
}
@@ -8273,13 +8438,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* handling context.
*/
hba->host->eh_noresume = 1;
- if (hba->wlun_dev_clr_ua) {
- ret = ufshcd_send_request_sense(hba, sdp);
- if (ret)
- goto out;
- /* Unit attention condition is cleared now */
- hba->wlun_dev_clr_ua = false;
- }
+ ufshcd_clear_ua_wluns(hba);
cmd[4] = pwr_mode << 4;
@@ -8300,7 +8459,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
if (!ret)
hba->curr_dev_pwr_mode = pwr_mode;
-out:
+
scsi_device_put(sdp);
hba->host->eh_noresume = 0;
return ret;
@@ -8327,7 +8486,8 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
}
/*
* If autobkops is enabled, link can't be turned off because
- * turning off the link would also turn off the device.
+ * turning off the link would also turn off the device, except in the
+ * case of DeepSleep where the device is expected to remain powered.
*/
else if ((req_link_state == UIC_LINK_OFF_STATE) &&
(!check_for_bkops || !hba->auto_bkops_enabled)) {
@@ -8337,6 +8497,9 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* put the link in low power mode is to send the DME end point
* to device and then send the DME reset command to local
* unipro. But putting the link in hibern8 is much faster.
+ *
+ * Note also that putting the link in Hibern8 is a requirement
+ * for entering DeepSleep.
*/
ret = ufshcd_uic_hibern8_enter(hba);
if (ret) {
@@ -8440,13 +8603,13 @@ out:
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
{
- if (ufshcd_is_link_off(hba))
+ if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
ufshcd_setup_hba_vreg(hba, false);
}
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
{
- if (ufshcd_is_link_off(hba))
+ if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
ufshcd_setup_hba_vreg(hba, true);
}
@@ -8469,6 +8632,7 @@ static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret = 0;
+ int check_for_bkops;
enum ufs_pm_level pm_lvl;
enum ufs_dev_pwr_mode req_dev_pwr_mode;
enum uic_link_state req_link_state;
@@ -8539,12 +8703,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_wb_need_flush(hba));
}
+ flush_work(&hba->eeh_work);
+
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
- if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
- !ufshcd_is_runtime_pm(pm_op)) {
+ if (!ufshcd_is_runtime_pm(pm_op))
/* ensure that bkops is disabled */
ufshcd_disable_auto_bkops(hba);
- }
if (!hba->dev_info.b_rpm_dev_flush_capable) {
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
@@ -8553,8 +8717,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
}
- flush_work(&hba->eeh_work);
- ret = ufshcd_link_state_transition(hba, req_link_state, 1);
+ /*
+ * In the case of DeepSleep, the device is expected to remain powered
+ * with the link off, so do not check for bkops.
+ */
+ check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
+ ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
if (ret)
goto set_dev_active;
@@ -8575,11 +8743,7 @@ disable_clks:
*/
ufshcd_disable_irq(hba);
- if (!ufshcd_is_link_active(hba))
- ufshcd_setup_clocks(hba, false);
- else
- /* If link is active, device ref_clk can't be switched off */
- __ufshcd_setup_clocks(hba, false, true);
+ ufshcd_setup_clocks(hba, false);
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
@@ -8595,11 +8759,25 @@ set_link_active:
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
+ /*
+ * Device hardware reset is required to exit DeepSleep. Also, for
+ * DeepSleep, the link is off so host reset and restore will be done
+ * further below.
+ */
+ if (ufshcd_is_ufs_dev_deepsleep(hba)) {
+ ufshcd_device_reset(hba);
+ WARN_ON(!ufshcd_is_link_off(hba));
+ }
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
ufshcd_set_link_active(hba);
else if (ufshcd_is_link_off(hba))
ufshcd_host_reset_and_restore(hba);
set_dev_active:
+ /* Can also get here needing to exit DeepSleep */
+ if (ufshcd_is_ufs_dev_deepsleep(hba)) {
+ ufshcd_device_reset(hba);
+ ufshcd_host_reset_and_restore(hba);
+ }
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
@@ -8607,6 +8785,7 @@ enable_gating:
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
+ ufshcd_clear_ua_wluns(hba);
ufshcd_release(hba);
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
@@ -8617,7 +8796,7 @@ out:
hba->pm_op_in_progress = 0;
if (ret)
- ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
+ ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
return ret;
}
@@ -8661,6 +8840,9 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto disable_vreg;
+ /* For DeepSleep, the only supported option is to have the link off */
+ WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
+
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
if (!ret) {
@@ -8674,6 +8856,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/*
* A full initialization of the host and the device is
* required since the link was put to off during suspend.
+ * Note, in the case of DeepSleep, the device will exit
+ * DeepSleep due to device reset.
*/
ret = ufshcd_reset_and_restore(hba);
/*
@@ -8712,6 +8896,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
+ ufshcd_clear_ua_wluns(hba);
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -8736,7 +8922,7 @@ disable_irq_and_vops_clks:
out:
hba->pm_op_in_progress = 0;
if (ret)
- ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
return ret;
}
@@ -8753,13 +8939,15 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
+ down(&hba->eh_sem);
if (!hba || !hba->is_powered)
return 0;
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
hba->curr_dev_pwr_mode) &&
(ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
- hba->uic_link_state))
+ hba->uic_link_state) &&
+ !hba->dev_info.b_rpm_dev_flush_capable)
goto out;
if (pm_runtime_suspended(hba->dev)) {
@@ -8783,6 +8971,8 @@ out:
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = true;
+ else
+ up(&hba->eh_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
@@ -8799,8 +8989,10 @@ int ufshcd_system_resume(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- if (!hba)
+ if (!hba) {
+ up(&hba->eh_sem);
return -EINVAL;
+ }
if (!hba->is_powered || pm_runtime_suspended(hba->dev))
/*
@@ -8816,6 +9008,7 @@ out:
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = false;
+ up(&hba->eh_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -8907,6 +9100,7 @@ int ufshcd_shutdown(struct ufs_hba *hba)
{
int ret = 0;
+ down(&hba->eh_sem);
if (!hba->is_powered)
goto out;
@@ -8919,6 +9113,8 @@ int ufshcd_shutdown(struct ufs_hba *hba)
out:
if (ret)
dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+ hba->is_powered = false;
+ up(&hba->eh_sem);
/* allow force shutdown even in case of errors */
return 0;
}
@@ -9114,6 +9310,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+ sema_init(&hba->eh_sem, 1);
+
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
@@ -9177,7 +9375,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ufshcd_init_crypto(hba);
@@ -9185,7 +9383,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = ufshcd_hba_enable(hba);
if (err) {
dev_err(hba->dev, "Host controller enable failed\n");
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
goto free_tmf_queue;
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index e0f00a42371c..aa9ea3552323 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -58,6 +58,29 @@ enum dev_cmd_type {
DEV_CMD_TYPE_QUERY = 0x1,
};
+enum ufs_event_type {
+ /* uic specific errors */
+ UFS_EVT_PA_ERR = 0,
+ UFS_EVT_DL_ERR,
+ UFS_EVT_NL_ERR,
+ UFS_EVT_TL_ERR,
+ UFS_EVT_DME_ERR,
+
+ /* fatal errors */
+ UFS_EVT_AUTO_HIBERN8_ERR,
+ UFS_EVT_FATAL_ERR,
+ UFS_EVT_LINK_STARTUP_FAIL,
+ UFS_EVT_RESUME_ERR,
+ UFS_EVT_SUSPEND_ERR,
+
+ /* abnormal events */
+ UFS_EVT_DEV_RESET,
+ UFS_EVT_HOST_RESET,
+ UFS_EVT_ABORT,
+
+ UFS_EVT_CNT,
+};
+
/**
* struct uic_command - UIC command structure
* @command: UIC command
@@ -116,16 +139,22 @@ enum uic_link_state {
((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
#define ufshcd_set_ufs_dev_poweroff(h) \
((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_set_ufs_dev_deepsleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
#define ufshcd_is_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
#define ufshcd_is_ufs_dev_sleep(h) \
((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
#define ufshcd_is_ufs_dev_poweroff(h) \
((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_deepsleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
/*
* UFS Power management levels.
- * Each level is in increasing order of power savings.
+ * Each level is in increasing order of power savings, except DeepSleep
+ * which is lower than PowerDown with power on but not PowerDown with
+ * power off.
*/
enum ufs_pm_level {
UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
@@ -134,6 +163,7 @@ enum ufs_pm_level {
UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
+ UFS_PM_LVL_6, /* UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE */
UFS_PM_LVL_MAX
};
@@ -165,6 +195,7 @@ struct ufs_pm_lvl_states {
* @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
* @data_unit_num: the data unit number for the first block for inline crypto
* @req_abort_skip: skip request abort task flag
+ * @in_use: indicates that this lrb is still in use
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -194,6 +225,7 @@ struct ufshcd_lrb {
#endif
bool req_abort_skip;
+ bool in_use;
};
/**
@@ -229,6 +261,8 @@ struct ufs_dev_cmd {
* @max_freq: maximum frequency supported by the clock
* @min_freq: min frequency that can be used for clock scaling
* @curr_freq: indicates the current frequency that it is set to
+ * @keep_link_active: indicates that the clk should not be disabled if
+ link is active
* @enabled: variable to check against multiple enable/disable
*/
struct ufs_clk_info {
@@ -238,6 +272,7 @@ struct ufs_clk_info {
u32 max_freq;
u32 min_freq;
u32 curr_freq;
+ bool keep_link_active;
bool enabled;
};
@@ -269,7 +304,6 @@ struct ufs_pwr_mode_info {
* @get_ufs_hci_version: called to get UFS HCI version
* @clk_scale_notify: notifies that clks are scaled up/down
* @setup_clocks: called before touching any of the controller registers
- * @setup_regulators: called before accessing the host controller
* @hce_enable_notify: called before and after HCE enable bit is set to allow
* variant specific Uni-Pro initialization.
* @link_startup_notify: called before and after Link startup is carried out
@@ -289,6 +323,7 @@ struct ufs_pwr_mode_info {
* @phy_initialization: used to initialize phys
* @device_reset: called to issue a reset pulse on the UFS device
* @program_key: program or evict an inline encryption key
+ * @event_notify: called to notify important events
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -299,7 +334,6 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status);
int (*setup_clocks)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
- int (*setup_regulators)(struct ufs_hba *, bool);
int (*hce_enable_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
int (*link_startup_notify)(struct ufs_hba *,
@@ -318,12 +352,14 @@ struct ufs_hba_variant_ops {
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
int (*phy_initialization)(struct ufs_hba *);
- void (*device_reset)(struct ufs_hba *hba);
+ int (*device_reset)(struct ufs_hba *hba);
void (*config_scaling_param)(struct ufs_hba *hba,
struct devfreq_dev_profile *profile,
void *data);
int (*program_key)(struct ufs_hba *hba,
const union ufs_crypto_cfg_entry *cfg, int slot);
+ void (*event_notify)(struct ufs_hba *hba,
+ enum ufs_event_type evt, void *data);
};
/* clock gating state */
@@ -382,6 +418,7 @@ struct ufs_saved_pwr_info {
* @workq: workqueue to schedule devfreq suspend/resume work
* @suspend_work: worker to suspend devfreq
* @resume_work: worker to resume devfreq
+ * @min_gear: lowest HS gear to scale down to
* @is_allowed: tracks if scaling is currently allowed or not
* @is_busy_started: tracks if busy period has started or not
* @is_suspended: tracks if devfreq is suspended or not
@@ -396,22 +433,23 @@ struct ufs_clk_scaling {
struct workqueue_struct *workq;
struct work_struct suspend_work;
struct work_struct resume_work;
+ u32 min_gear;
bool is_allowed;
bool is_busy_started;
bool is_suspended;
};
-#define UFS_ERR_REG_HIST_LENGTH 8
+#define UFS_EVENT_HIST_LENGTH 8
/**
- * struct ufs_err_reg_hist - keeps history of errors
+ * struct ufs_event_hist - keeps history of errors
* @pos: index to indicate cyclic buffer position
* @reg: cyclic buffer for registers value
* @tstamp: cyclic buffer for time stamp
*/
-struct ufs_err_reg_hist {
+struct ufs_event_hist {
int pos;
- u32 reg[UFS_ERR_REG_HIST_LENGTH];
- ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH];
+ u32 val[UFS_EVENT_HIST_LENGTH];
+ ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
};
/**
@@ -422,19 +460,6 @@ struct ufs_err_reg_hist {
* reset this after link-startup.
* @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
* Clear after the first successful command completion.
- * @pa_err: tracks pa-uic errors
- * @dl_err: tracks dl-uic errors
- * @nl_err: tracks nl-uic errors
- * @tl_err: tracks tl-uic errors
- * @dme_err: tracks dme errors
- * @auto_hibern8_err: tracks auto-hibernate errors
- * @fatal_err: tracks fatal errors
- * @linkup_err: tracks link-startup errors
- * @resume_err: tracks resume errors
- * @suspend_err: tracks suspend errors
- * @dev_reset: tracks device reset events
- * @host_reset: tracks host reset events
- * @tsk_abort: tracks task abort events
*/
struct ufs_stats {
u32 last_intr_status;
@@ -442,25 +467,7 @@ struct ufs_stats {
u32 hibern8_exit_cnt;
ktime_t last_hibern8_exit_tstamp;
-
- /* uic specific errors */
- struct ufs_err_reg_hist pa_err;
- struct ufs_err_reg_hist dl_err;
- struct ufs_err_reg_hist nl_err;
- struct ufs_err_reg_hist tl_err;
- struct ufs_err_reg_hist dme_err;
-
- /* fatal errors */
- struct ufs_err_reg_hist auto_hibern8_err;
- struct ufs_err_reg_hist fatal_err;
- struct ufs_err_reg_hist link_startup_err;
- struct ufs_err_reg_hist resume_err;
- struct ufs_err_reg_hist suspend_err;
-
- /* abnormal events */
- struct ufs_err_reg_hist dev_reset;
- struct ufs_err_reg_hist host_reset;
- struct ufs_err_reg_hist task_abort;
+ struct ufs_event_hist event[UFS_EVT_CNT];
};
enum ufshcd_quirks {
@@ -594,6 +601,21 @@ enum ufshcd_caps {
* inline crypto engine, if it is present
*/
UFSHCD_CAP_CRYPTO = 1 << 8,
+
+ /*
+ * This capability allows the controller regulators to be put into
+ * lpm mode aggressively during clock gating.
+ * This would increase power savings.
+ */
+ UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
+
+ /*
+ * This capability allows the host controller driver to use DeepSleep,
+ * if it is supported by the UFS device. The host controller driver must
+ * support device hardware reset via the hba->device_reset() callback,
+ * in order to exit DeepSleep state.
+ */
+ UFSHCD_CAP_DEEPSLEEP = 1 << 10,
};
struct ufs_hba_variant_params {
@@ -683,6 +705,7 @@ struct ufs_hba {
* "UFS device" W-LU.
*/
struct scsi_device *sdev_ufs_device;
+ struct scsi_device *sdev_rpmb;
enum ufs_dev_pwr_mode curr_dev_pwr_mode;
enum uic_link_state uic_link_state;
@@ -730,6 +753,7 @@ struct ufs_hba {
u32 intr_mask;
u16 ee_ctrl_mask;
bool is_powered;
+ struct semaphore eh_sem;
/* Work Queues */
struct workqueue_struct *eh_wq;
@@ -831,6 +855,12 @@ return true;
#endif
}
+static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
+{
+ return !!(ufshcd_is_link_hibern8(hba) &&
+ (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
+}
+
static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
{
return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
@@ -882,8 +912,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
-void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
- u32 reg);
+void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
static inline void check_upiu_size(void)
{
@@ -930,6 +959,9 @@ extern int ufshcd_runtime_idle(struct ufs_hba *hba);
extern int ufshcd_system_suspend(struct ufs_hba *hba);
extern int ufshcd_system_resume(struct ufs_hba *hba);
extern int ufshcd_shutdown(struct ufs_hba *hba);
+extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
+ int agreed_gear,
+ int adapt_val);
extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer);
extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
@@ -1076,6 +1108,14 @@ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
return 0;
}
+static inline void ufshcd_vops_event_notify(struct ufs_hba *hba,
+ enum ufs_event_type evt,
+ void *data)
+{
+ if (hba->vops && hba->vops->event_notify)
+ hba->vops->event_notify(hba, evt, data);
+}
+
static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
@@ -1084,14 +1124,6 @@ static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
return 0;
}
-static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
-{
- if (hba->vops && hba->vops->setup_regulators)
- return hba->vops->setup_regulators(hba, status);
-
- return 0;
-}
-
static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
bool status)
{
@@ -1109,6 +1141,14 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
return 0;
}
+static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->phy_initialization)
+ return hba->vops->phy_initialization(hba);
+
+ return 0;
+}
+
static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
bool status,
struct ufs_pa_layer_attr *dev_max_params,
@@ -1178,13 +1218,12 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
hba->vops->dbg_register_dump(hba);
}
-static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
+static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->device_reset) {
- hba->vops->device_reset(hba);
- ufshcd_set_ufs_dev_active(hba);
- ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
- }
+ if (hba->vops && hba->vops->device_reset)
+ return hba->vops->device_reset(hba);
+
+ return -EOPNOTSUPP;
}
static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index f6b52ce36de6..8e9e486a4f7b 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -237,8 +237,10 @@ enum ufs_unipro_ver {
UFS_UNIPRO_VER_RESERVED = 0,
UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
- UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
- UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */
+ UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
+ UFS_UNIPRO_VER_1_61 = 4, /* UniPro version 1.61 */
+ UFS_UNIPRO_VER_1_8 = 5, /* UniPro version 1.8 */
+ UFS_UNIPRO_VER_MAX = 6, /* UniPro unsupported version */
/* UniPro version field mask in PA_LOCALVERINFO */
UFS_UNIPRO_VER_MASK = 0xF,
};
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index f8e070d67fa3..a14684ffe4c1 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -214,7 +214,7 @@ int __init register_intc_controller(struct intc_desc *desc)
d->window[k].phys = res->start;
d->window[k].size = resource_size(res);
d->window[k].virt = ioremap(res->start,
- resource_size(res));
+ resource_size(res));
if (!d->window[k].virt)
goto err2;
}
diff --git a/drivers/sh/intc/virq-debugfs.c b/drivers/sh/intc/virq-debugfs.c
index 9e62ba9311f0..939915a07d99 100644
--- a/drivers/sh/intc/virq-debugfs.c
+++ b/drivers/sh/intc/virq-debugfs.c
@@ -16,7 +16,7 @@
#include <linux/debugfs.h>
#include "internals.h"
-static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
+static int intc_irq_xlate_show(struct seq_file *m, void *priv)
{
int i;
@@ -37,17 +37,7 @@ static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
return 0;
}
-static int intc_irq_xlate_open(struct inode *inode, struct file *file)
-{
- return single_open(file, intc_irq_xlate_debug, inode->i_private);
-}
-
-static const struct file_operations intc_irq_xlate_fops = {
- .open = intc_irq_xlate_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(intc_irq_xlate);
static int __init intc_irq_xlate_init(void)
{
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 425ab6f7e375..d097d070f579 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -9,6 +9,7 @@ source "drivers/soc/bcm/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/imx/Kconfig"
source "drivers/soc/ixp4xx/Kconfig"
+source "drivers/soc/litex/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 36452bed86ef..699b758d28e4 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
-obj-$(CONFIG_SOC_ASPEED) += aspeed/
+obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
obj-$(CONFIG_ARCH_DOVE) += dove/
@@ -14,6 +14,7 @@ obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-y += imx/
obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
+obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
obj-y += mediatek/
obj-y += amlogic/
obj-y += qcom/
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index 321c5e26a268..174a9b011461 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -9,7 +9,7 @@ config MESON_CANVAS
Say yes to support the canvas IP for Amlogic SoCs.
config MESON_CLK_MEASURE
- bool "Amlogic Meson SoC Clock Measure driver"
+ tristate "Amlogic Meson SoC Clock Measure driver"
depends on ARCH_MESON || COMPILE_TEST
default ARCH_MESON
select REGMAP_MMIO
@@ -19,7 +19,7 @@ config MESON_CLK_MEASURE
config MESON_GX_SOCINFO
bool "Amlogic Meson GX SoC Information driver"
- depends on ARCH_MESON || COMPILE_TEST
+ depends on (ARM64 && ARCH_MESON) || COMPILE_TEST
default ARCH_MESON
select SOC_BUS
help
@@ -27,7 +27,7 @@ config MESON_GX_SOCINFO
information about the type, package and version.
config MESON_GX_PM_DOMAINS
- bool "Amlogic Meson GX Power Domains driver"
+ tristate "Amlogic Meson GX Power Domains driver"
depends on ARCH_MESON || COMPILE_TEST
depends on PM && OF
default ARCH_MESON
@@ -38,7 +38,7 @@ config MESON_GX_PM_DOMAINS
Generic Power Domains.
config MESON_EE_PM_DOMAINS
- bool "Amlogic Meson Everything-Else Power Domains driver"
+ tristate "Amlogic Meson Everything-Else Power Domains driver"
depends on ARCH_MESON || COMPILE_TEST
depends on PM && OF
default ARCH_MESON
@@ -49,7 +49,7 @@ config MESON_EE_PM_DOMAINS
Generic Power Domains.
config MESON_SECURE_PM_DOMAINS
- bool "Amlogic Meson Secure Power Domains driver"
+ tristate "Amlogic Meson Secure Power Domains driver"
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
depends on PM && OF
depends on HAVE_ARM_SMCCC
@@ -63,7 +63,7 @@ config MESON_SECURE_PM_DOMAINS
config MESON_MX_SOCINFO
bool "Amlogic Meson MX SoC Information driver"
- depends on ARCH_MESON || COMPILE_TEST
+ depends on (ARM && ARCH_MESON) || COMPILE_TEST
default ARCH_MESON
select SOC_BUS
help
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index c655f5f92b12..d0329ad170d1 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -72,8 +72,10 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
* current state, this driver probe cannot return -EPROBE_DEFER
*/
canvas = dev_get_drvdata(&canvas_pdev->dev);
- if (!canvas)
+ if (!canvas) {
+ put_device(&canvas_pdev->dev);
return ERR_PTR(-EINVAL);
+ }
return canvas;
}
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index 0fa47d77577d..e1957476a006 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -10,6 +10,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/regmap.h>
+#include <linux/module.h>
static DEFINE_MUTEX(measure_lock);
@@ -681,6 +682,7 @@ static const struct of_device_id meson_msr_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_msr_match_table);
static struct platform_driver meson_msr_driver = {
.probe = meson_msr_probe,
@@ -689,4 +691,5 @@ static struct platform_driver meson_msr_driver = {
.of_match_table = meson_msr_match_table,
},
};
-builtin_platform_driver(meson_msr_driver);
+module_platform_driver(meson_msr_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 5164a4dc2352..50bf5d2b828b 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -14,6 +14,7 @@
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <dt-bindings/power/meson8-power.h>
#include <dt-bindings/power/meson-axg-power.h>
#include <dt-bindings/power/meson-g12a-power.h>
@@ -412,8 +413,7 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
dev_warn(&pdev->dev, "Invalid resets count %d for domain %s\n",
count, dom->desc.name);
- dom->rstc = devm_reset_control_array_get(&pdev->dev, false,
- false);
+ dom->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(dom->rstc))
return PTR_ERR(dom->rstc);
}
@@ -602,6 +602,7 @@ static const struct of_device_id meson_ee_pwrc_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_ee_pwrc_match_table);
static struct platform_driver meson_ee_pwrc_driver = {
.probe = meson_ee_pwrc_probe,
@@ -611,4 +612,5 @@ static struct platform_driver meson_ee_pwrc_driver = {
.of_match_table = meson_ee_pwrc_match_table,
},
};
-builtin_platform_driver(meson_ee_pwrc_driver);
+module_platform_driver(meson_ee_pwrc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index 21b4bc811c00..b4615b288625 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/module.h>
/* AO Offsets */
@@ -303,7 +304,7 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
return PTR_ERR(regmap_hhi);
}
- rstc = devm_reset_control_array_get(&pdev->dev, false, false);
+ rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rstc)) {
if (PTR_ERR(rstc) != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to get reset lines\n");
@@ -364,6 +365,7 @@ static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_gx_pwrc_vpu_match_table);
static struct platform_driver meson_gx_pwrc_vpu_driver = {
.probe = meson_gx_pwrc_vpu_probe,
@@ -373,4 +375,5 @@ static struct platform_driver meson_gx_pwrc_vpu_driver = {
.of_match_table = meson_gx_pwrc_vpu_match_table,
},
};
-builtin_platform_driver(meson_gx_pwrc_vpu_driver);
+module_platform_driver(meson_gx_pwrc_vpu_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
index 5fb29a475879..59bd195fa9c9 100644
--- a/drivers/soc/amlogic/meson-secure-pwrc.c
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -13,6 +13,7 @@
#include <dt-bindings/power/meson-a1-power.h>
#include <linux/arm-smccc.h>
#include <linux/firmware/meson/meson_sm.h>
+#include <linux/module.h>
#define PWRC_ON 1
#define PWRC_OFF 0
@@ -193,6 +194,7 @@ static const struct of_device_id meson_secure_pwrc_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_secure_pwrc_match_table);
static struct platform_driver meson_secure_pwrc_driver = {
.probe = meson_secure_pwrc_probe,
@@ -201,4 +203,5 @@ static struct platform_driver meson_secure_pwrc_driver = {
.of_match_table = meson_secure_pwrc_match_table,
},
};
-builtin_platform_driver(meson_secure_pwrc_driver);
+module_platform_driver(meson_secure_pwrc_driver);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
index c95fa30f1a76..243ca196e6ad 100644
--- a/drivers/soc/aspeed/Kconfig
+++ b/drivers/soc/aspeed/Kconfig
@@ -1,32 +1,47 @@
# SPDX-License-Identifier: GPL-2.0-only
-menu "Aspeed SoC drivers"
-config SOC_ASPEED
- def_bool y
- depends on ARCH_ASPEED || COMPILE_TEST
+if ARCH_ASPEED || COMPILE_TEST
+
+menu "ASPEED SoC drivers"
config ASPEED_LPC_CTRL
- depends on SOC_ASPEED && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
+ tristate "ASPEED LPC firmware cycle control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
help
- Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
- ioctl()s, the driver also provides a read/write interface to a BMC ram
- region where the host LPC read/write region can be buffered.
+ Control LPC firmware cycle mappings through ioctl()s. The driver
+ also provides a read/write interface to a BMC ram region where the
+ host LPC read/write region can be buffered.
config ASPEED_LPC_SNOOP
- tristate "Aspeed ast2500 HOST LPC snoop support"
- depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ tristate "ASPEED LPC snoop support"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
help
Provides a driver to control the LPC snoop interface which
allows the BMC to listen on and save the data written by
the host to an arbitrary LPC I/O port.
config ASPEED_P2A_CTRL
- depends on SOC_ASPEED && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
+ tristate "ASPEED P2A (VGA MMIO to BMC) bridge control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
+ help
+ Control ASPEED P2A VGA MMIO to BMC mappings through ioctl()s. The
+ driver also provides an interface for userspace mappings to a
+ pre-defined region.
+
+config ASPEED_SOCINFO
+ bool "ASPEED SoC Information driver"
+ default ARCH_ASPEED
+ select SOC_BUS
+ default ARCH_ASPEED
help
- Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
- ioctl()s, the driver also provides an interface for userspace mappings to
- a pre-defined region.
+ Say yes to support decoding of ASPEED BMC information.
endmenu
+
+endif
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
index b64be47f2b1f..fcab7192e1a4 100644
--- a/drivers/soc/aspeed/Makefile
+++ b/drivers/soc/aspeed/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
+obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 01ed21e8bfee..439bcd6b8c4a 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/log2.h>
#include <linux/mfd/syscon.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
@@ -21,6 +22,9 @@
#define HICR5_ENL2H BIT(8)
#define HICR5_ENFWH BIT(10)
+#define HICR6 0x4
+#define SW_FWH2AHB BIT(17)
+
#define HICR7 0x8
#define HICR8 0xc
@@ -30,8 +34,9 @@ struct aspeed_lpc_ctrl {
struct clk *clk;
phys_addr_t mem_base;
resource_size_t mem_size;
- u32 pnor_size;
- u32 pnor_base;
+ u32 pnor_size;
+ u32 pnor_base;
+ bool fwh2ahb;
};
static struct aspeed_lpc_ctrl *file_aspeed_lpc_ctrl(struct file *file)
@@ -177,6 +182,16 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
return rc;
/*
+ * Switch to FWH2AHB mode, AST2600 only.
+ *
+ * The other bits in this register are interrupt status bits
+ * that are cleared by writing 1. As we don't want to clear
+ * them, set only the bit of interest.
+ */
+ if (lpc_ctrl->fwh2ahb)
+ regmap_write(lpc_ctrl->regmap, HICR6, SW_FWH2AHB);
+
+ /*
* Enable LPC FHW cycles. This is required for the host to
* access the regions specified.
*/
@@ -241,6 +256,18 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
lpc_ctrl->mem_size = resource_size(&resm);
lpc_ctrl->mem_base = resm.start;
+
+ if (!is_power_of_2(lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory size must be a power of 2, got %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(lpc_ctrl->mem_base, lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory must be naturally aligned for size %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
}
lpc_ctrl->regmap = syscon_node_to_regmap(
@@ -261,6 +288,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
return rc;
}
+ if (of_device_is_compatible(dev->of_node, "aspeed,ast2600-lpc-ctrl"))
+ lpc_ctrl->fwh2ahb = true;
+
lpc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
lpc_ctrl->miscdev.name = DEVICE_NAME;
lpc_ctrl->miscdev.fops = &aspeed_lpc_ctrl_fops;
@@ -291,6 +321,7 @@ static int aspeed_lpc_ctrl_remove(struct platform_device *pdev)
static const struct of_device_id aspeed_lpc_ctrl_match[] = {
{ .compatible = "aspeed,ast2400-lpc-ctrl" },
{ .compatible = "aspeed,ast2500-lpc-ctrl" },
+ { .compatible = "aspeed,ast2600-lpc-ctrl" },
{ },
};
@@ -308,4 +339,4 @@ module_platform_driver(aspeed_lpc_ctrl_driver);
MODULE_DEVICE_TABLE(of, aspeed_lpc_ctrl_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cyril Bur <cyrilbur@gmail.com>");
-MODULE_DESCRIPTION("Control for aspeed 2400/2500 LPC HOST to BMC mappings");
+MODULE_DESCRIPTION("Control for ASPEED LPC HOST to BMC mappings");
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index f3d8d53ab84d..682ba0eb4eba 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -325,6 +325,8 @@ static const struct of_device_id aspeed_lpc_snoop_match[] = {
.data = &ast2400_model_data },
{ .compatible = "aspeed,ast2500-lpc-snoop",
.data = &ast2500_model_data },
+ { .compatible = "aspeed,ast2600-lpc-snoop",
+ .data = &ast2500_model_data },
{ },
};
diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
new file mode 100644
index 000000000000..773930e0cb10
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-socinfo.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright 2019 IBM Corp. */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+static struct {
+ const char *name;
+ const u32 id;
+} const rev_table[] = {
+ /* AST2400 */
+ { "AST2400", 0x02000303 },
+ { "AST1400", 0x02010103 },
+ { "AST1250", 0x02010303 },
+ /* AST2500 */
+ { "AST2500", 0x04000303 },
+ { "AST2510", 0x04000103 },
+ { "AST2520", 0x04000203 },
+ { "AST2530", 0x04000403 },
+ /* AST2600 */
+ { "AST2600", 0x05000303 },
+ { "AST2620", 0x05010203 },
+};
+
+static const char *siliconid_to_name(u32 siliconid)
+{
+ unsigned int id = siliconid & 0xff00ffff;
+ unsigned int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rev_table) ; ++i) {
+ if (rev_table[i].id == id)
+ return rev_table[i].name;
+ }
+
+ return "Unknown";
+}
+
+static const char *siliconid_to_rev(u32 siliconid)
+{
+ unsigned int rev = (siliconid >> 16) & 0xff;
+
+ switch (rev) {
+ case 0:
+ return "A0";
+ case 1:
+ return "A1";
+ case 3:
+ return "A2";
+ }
+
+ return "??";
+}
+
+static int __init aspeed_socinfo_init(void)
+{
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ void __iomem *reg;
+ bool has_chipid = false;
+ u32 siliconid;
+ u32 chipid[2];
+ const char *machine = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, "aspeed,silicon-id");
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ reg = of_iomap(np, 0);
+ if (!reg) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+ siliconid = readl(reg);
+ iounmap(reg);
+
+ /* This is optional, the ast2400 does not have it */
+ reg = of_iomap(np, 1);
+ if (reg) {
+ has_chipid = true;
+ chipid[0] = readl(reg);
+ chipid[1] = readl(reg + 4);
+ iounmap(reg);
+ }
+ of_node_put(np);
+
+ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENODEV;
+
+ /*
+ * Machine: Romulus BMC
+ * Family: AST2500
+ * Revision: A1
+ * SoC ID: raw silicon revision id
+ * Serial Number: 64-bit chipid
+ */
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &machine);
+ if (machine)
+ attrs->machine = kstrdup(machine, GFP_KERNEL);
+ of_node_put(np);
+
+ attrs->family = siliconid_to_name(siliconid);
+ attrs->revision = siliconid_to_rev(siliconid);
+ attrs->soc_id = kasprintf(GFP_KERNEL, "%08x", siliconid);
+
+ if (has_chipid)
+ attrs->serial_number = kasprintf(GFP_KERNEL, "%08x%08x",
+ chipid[1], chipid[0]);
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev)) {
+ kfree(attrs->soc_id);
+ kfree(attrs->serial_number);
+ kfree(attrs);
+ return PTR_ERR(soc_dev);
+ }
+
+ pr_info("ASPEED %s rev %s (%s)\n",
+ attrs->family,
+ attrs->revision,
+ attrs->soc_id);
+
+ return 0;
+}
+early_initcall(aspeed_socinfo_init);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 55a1f57a4d8c..698d21f50516 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -69,6 +69,12 @@ static const struct at91_soc __initconst socs[] = {
#endif
#ifdef CONFIG_SOC_SAM9X60
AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, "sam9x60", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D5M_EXID_MATCH,
+ "sam9x60 64MiB DDR2 SiP", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D1G_EXID_MATCH,
+ "sam9x60 128MiB DDR2 SiP", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D6K_EXID_MATCH,
+ "sam9x60 8MiB SDRAM SiP", "sam9x60"),
#endif
#ifdef CONFIG_SOC_SAMA5
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
@@ -265,8 +271,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
return soc_dev;
}
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ { .compatible = "atmel,at91rm9200", },
+ { .compatible = "atmel,at91sam9", },
+ { .compatible = "atmel,sama5", },
+ { .compatible = "atmel,samv7", },
+ { }
+};
+
static int __init atmel_soc_device_init(void)
{
+ struct device_node *np = of_find_node_by_path("/");
+
+ if (!of_match_node(at91_soc_allowed_list, np))
+ return 0;
+
at91_soc_init(socs);
return 0;
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index ee652e4841a5..5849846a69d6 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -60,6 +60,9 @@ at91_soc_init(const struct at91_soc *socs);
#define AT91SAM9CN11_EXID_MATCH 0x00000009
#define SAM9X60_EXID_MATCH 0x00000000
+#define SAM9X60_D5M_EXID_MATCH 0x00000001
+#define SAM9X60_D1G_EXID_MATCH 0x00000010
+#define SAM9X60_D6K_EXID_MATCH 0x00000011
#define AT91SAM9XE128_CIDR_MATCH 0x329973a0
#define AT91SAM9XE256_CIDR_MATCH 0x329a93a0
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index b1062334e608..a673fdffe216 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -111,6 +111,8 @@ enum bsp_initiate_command {
static struct brcmstb_pm_control ctrl;
+noinline int brcmstb_pm_s3_finish(void);
+
static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
void __iomem *ddr_phy_pll_status);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 659b4a570d5b..f13da4d7d1c5 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -424,7 +424,7 @@ int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
/**
* qbman_swp_interrupt_set_inhibit() - write interrupt mask register
* @p: the given software portal object
- * @mask: The mask to set in SWP_IIR register
+ * @inhibit: whether to inhibit the IRQs
*/
void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
{
@@ -510,7 +510,7 @@ enum qb_enqueue_commands {
#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
-/**
+/*
* qbman_eq_desc_clear() - Clear the contents of a descriptor to
* default/starting state.
*/
@@ -522,7 +522,7 @@ void qbman_eq_desc_clear(struct qbman_eq_desc *d)
/**
* qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
* @d: the enqueue descriptor.
- * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * @respond_success: 1 = enqueue with response always; 0 = enqueue with
* rejections returned on a FQ.
*/
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
@@ -932,7 +932,7 @@ int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
/**
* qbman_swp_push_get() - Get the push dequeue setup
- * @p: the software portal object
+ * @s: the software portal object
* @channel_idx: the channel index to query
* @enabled: returned boolean to show whether the push dequeue is enabled
* for the given channel
@@ -947,7 +947,7 @@ void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
/**
* qbman_swp_push_set() - Enable or disable push dequeue
- * @p: the software portal object
+ * @s: the software portal object
* @channel_idx: the channel index (0 to 15)
* @enable: enable or disable push dequeue
*/
@@ -1046,6 +1046,7 @@ void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
/**
* qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
* @fqid: the frame queue index of the given FQ
*/
void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
@@ -1057,6 +1058,7 @@ void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
/**
* qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
* @wqid: composed of channel id and wqid within the channel
* @dct: the dequeue command type
*/
@@ -1071,6 +1073,7 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
/**
* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
* dequeues
+ * @d: the pull dequeue descriptor to be set
* @chid: the channel id to be dequeued
* @dct: the dequeue command type
*/
@@ -1398,6 +1401,7 @@ int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
/**
* qbman_release_desc_clear() - Clear the contents of a descriptor to
* default/starting state.
+ * @d: the pull dequeue descriptor to be cleared
*/
void qbman_release_desc_clear(struct qbman_release_desc *d)
{
@@ -1407,6 +1411,8 @@ void qbman_release_desc_clear(struct qbman_release_desc *d)
/**
* qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ * @d: the pull dequeue descriptor to be set
+ * @bpid: the bpid value to be set
*/
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
{
@@ -1416,6 +1422,8 @@ void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
/**
* qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
* interrupt source should be asserted after the release command is completed.
+ * @d: the pull dequeue descriptor to be set
+ * @enable: enable (1) or disable (0) value
*/
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
{
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 101def7dc73d..a1b9be1d105a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2622,7 +2622,7 @@ int qman_shutdown_fq(u32 fqid)
union qm_mc_command *mcc;
union qm_mc_result *mcr;
int orl_empty, drain = 0, ret = 0;
- u32 channel, wq, res;
+ u32 channel, res;
u8 state;
p = get_affine_portal();
@@ -2655,7 +2655,7 @@ int qman_shutdown_fq(u32 fqid)
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
/* Need to store these since the MCR gets reused */
channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
- wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+ qm_fqd_get_wq(&mcr->queryfq.fqd);
if (channel < qm_channel_pool1) {
channel_portal = get_portal_for_channel(channel);
@@ -2697,7 +2697,6 @@ int qman_shutdown_fq(u32 fqid)
* to dequeue from the channel the FQ is scheduled on
*/
int found_fqrn = 0;
- u16 dequeue_wq = 0;
/* Flag that we need to drain FQ */
drain = 1;
@@ -2705,11 +2704,8 @@ int qman_shutdown_fq(u32 fqid)
if (channel >= qm_channel_pool1 &&
channel < qm_channel_pool1 + 15) {
/* Pool channel, enable the bit in the portal */
- dequeue_wq = (channel -
- qm_channel_pool1 + 1)<<4 | wq;
} else if (channel < qm_channel_pool1) {
/* Dedicated channel */
- dequeue_wq = wq;
} else {
dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
fqid, channel);
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 75075591f630..497a7e0fd027 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -231,7 +231,7 @@ EXPORT_SYMBOL(cpm_muram_offset);
/**
* cpm_muram_dma - turn a muram virtual address into a DMA address
- * @offset: virtual address from cpm_muram_addr() to convert
+ * @addr: virtual address from cpm_muram_addr() to convert
*/
dma_addr_t cpm_muram_dma(void __iomem *addr)
{
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
index a093dbe6d2cb..4ace28cab314 100644
--- a/drivers/soc/fsl/rcpm.c
+++ b/drivers/soc/fsl/rcpm.c
@@ -2,7 +2,7 @@
//
// rcpm.c - Freescale QorIQ RCPM driver
//
-// Copyright 2019 NXP
+// Copyright 2019-2020 NXP
//
// Author: Ran Wang <ran.wang_1@nxp.com>
@@ -22,6 +22,28 @@ struct rcpm {
bool little_endian;
};
+#define SCFG_SPARECR8 0x051c
+
+static void copy_ippdexpcr1_setting(u32 val)
+{
+ struct device_node *np;
+ void __iomem *regs;
+ u32 reg_val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021a-scfg");
+ if (!np)
+ return;
+
+ regs = of_iomap(np, 0);
+ if (!regs)
+ return;
+
+ reg_val = ioread32be(regs + SCFG_SPARECR8);
+ iowrite32be(val | reg_val, regs + SCFG_SPARECR8);
+
+ iounmap(regs);
+}
+
/**
* rcpm_pm_prepare - performs device-level tasks associated with power
* management, such as programming related to the wakeup source control.
@@ -90,6 +112,17 @@ static int rcpm_pm_prepare(struct device *dev)
tmp |= ioread32be(address);
iowrite32be(tmp, address);
}
+ /*
+ * Workaround of errata A-008646 on SoC LS1021A:
+ * There is a bug of register ippdexpcr1.
+ * Reading configuration register RCPM_IPPDEXPCR1
+ * always return zero. So save ippdexpcr1's value
+ * to register SCFG_SPARECR8.And the value of
+ * ippdexpcr1 will be read from SCFG_SPARECR8.
+ */
+ if (dev_of_node(dev) && (i == 1))
+ if (of_device_is_compatible(np, "fsl,ls1021a-rcpm"))
+ copy_ippdexpcr1_setting(tmp);
}
return 0;
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index a9370f4aacca..05812f8ae734 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -13,7 +13,7 @@ config SOC_IMX8M
depends on ARCH_MXC || COMPILE_TEST
default ARCH_MXC && ARM64
select SOC_BUS
- select ARM_GIC_V3 if ARCH_MXC
+ select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
help
If you say yes here you get support for the NXP i.MX8M family
support, it will provide the SoC info like SoC family,
diff --git a/drivers/soc/litex/Kconfig b/drivers/soc/litex/Kconfig
new file mode 100644
index 000000000000..7a7c38282e11
--- /dev/null
+++ b/drivers/soc/litex/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License_Identifier: GPL-2.0
+
+menu "Enable LiteX SoC Builder specific drivers"
+
+config LITEX
+ bool
+
+config LITEX_SOC_CONTROLLER
+ tristate "Enable LiteX SoC Controller driver"
+ depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ select LITEX
+ help
+ This option enables the SoC Controller Driver which verifies
+ LiteX CSR access and provides common litex_get_reg/litex_set_reg
+ accessors.
+ All drivers that use functions from litex.h must depend on
+ LITEX.
+
+endmenu
diff --git a/drivers/soc/litex/Makefile b/drivers/soc/litex/Makefile
new file mode 100644
index 000000000000..98ff7325b1c0
--- /dev/null
+++ b/drivers/soc/litex/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License_Identifier: GPL-2.0
+
+obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex_soc_ctrl.o
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
new file mode 100644
index 000000000000..9b0766384570
--- /dev/null
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LiteX SoC Controller Driver
+ *
+ * Copyright (C) 2020 Antmicro <www.antmicro.com>
+ *
+ */
+
+#include <linux/litex.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+/*
+ * LiteX SoC Generator, depending on the configuration, can split a single
+ * logical CSR (Control&Status Register) into a series of consecutive physical
+ * registers.
+ *
+ * For example, in the configuration with 8-bit CSR Bus, 32-bit aligned (the
+ * default one for 32-bit CPUs) a 32-bit logical CSR will be generated as four
+ * 32-bit physical registers, each one containing one byte of meaningful data.
+ *
+ * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
+ *
+ * The purpose of `litex_set_reg`/`litex_get_reg` is to implement the logic
+ * of writing to/reading from the LiteX CSR in a single place that can be
+ * then reused by all LiteX drivers.
+ */
+
+/**
+ * litex_set_reg() - Writes the value to the LiteX CSR (Control&Status Register)
+ * @reg: Address of the CSR
+ * @reg_size: The width of the CSR expressed in the number of bytes
+ * @val: Value to be written to the CSR
+ *
+ * In the currently supported LiteX configuration (8-bit CSR Bus, 32-bit aligned),
+ * a 32-bit LiteX CSR is generated as 4 consecutive 32-bit physical registers,
+ * each one containing one byte of meaningful data.
+ *
+ * This function splits a single possibly multi-byte write into a series of
+ * single-byte writes with a proper offset.
+ */
+void litex_set_reg(void __iomem *reg, unsigned long reg_size,
+ unsigned long val)
+{
+ unsigned long shifted_data, shift, i;
+
+ for (i = 0; i < reg_size; ++i) {
+ shift = ((reg_size - i - 1) * LITEX_SUBREG_SIZE_BIT);
+ shifted_data = val >> shift;
+
+ WRITE_LITEX_SUBREGISTER(shifted_data, reg, i);
+ }
+}
+EXPORT_SYMBOL_GPL(litex_set_reg);
+
+/**
+ * litex_get_reg() - Reads the value of the LiteX CSR (Control&Status Register)
+ * @reg: Address of the CSR
+ * @reg_size: The width of the CSR expressed in the number of bytes
+ *
+ * Return: Value read from the CSR
+ *
+ * In the currently supported LiteX configuration (8-bit CSR Bus, 32-bit aligned),
+ * a 32-bit LiteX CSR is generated as 4 consecutive 32-bit physical registers,
+ * each one containing one byte of meaningful data.
+ *
+ * This function generates a series of single-byte reads with a proper offset
+ * and joins their results into a single multi-byte value.
+ */
+unsigned long litex_get_reg(void __iomem *reg, unsigned long reg_size)
+{
+ unsigned long shifted_data, shift, i;
+ unsigned long result = 0;
+
+ for (i = 0; i < reg_size; ++i) {
+ shifted_data = READ_LITEX_SUBREGISTER(reg, i);
+
+ shift = ((reg_size - i - 1) * LITEX_SUBREG_SIZE_BIT);
+ result |= (shifted_data << shift);
+ }
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(litex_get_reg);
+
+#define SCRATCH_REG_OFF 0x04
+#define SCRATCH_REG_VALUE 0x12345678
+#define SCRATCH_TEST_VALUE 0xdeadbeef
+
+/*
+ * Check LiteX CSR read/write access
+ *
+ * This function reads and writes a scratch register in order to verify if CSR
+ * access works.
+ *
+ * In case any problems are detected, the driver should panic.
+ *
+ * Access to the LiteX CSR is, by design, done in CPU native endianness.
+ * The driver should not dynamically configure access functions when
+ * the endianness mismatch is detected. Such situation indicates problems in
+ * the soft SoC design and should be solved at the LiteX generator level,
+ * not in the software.
+ */
+static int litex_check_csr_access(void __iomem *reg_addr)
+{
+ unsigned long reg;
+
+ reg = litex_read32(reg_addr + SCRATCH_REG_OFF);
+
+ if (reg != SCRATCH_REG_VALUE) {
+ panic("Scratch register read error - the system is probably broken! Expected: 0x%x but got: 0x%lx",
+ SCRATCH_REG_VALUE, reg);
+ return -EINVAL;
+ }
+
+ litex_write32(reg_addr + SCRATCH_REG_OFF, SCRATCH_TEST_VALUE);
+ reg = litex_read32(reg_addr + SCRATCH_REG_OFF);
+
+ if (reg != SCRATCH_TEST_VALUE) {
+ panic("Scratch register write error - the system is probably broken! Expected: 0x%x but got: 0x%lx",
+ SCRATCH_TEST_VALUE, reg);
+ return -EINVAL;
+ }
+
+ /* restore original value of the SCRATCH register */
+ litex_write32(reg_addr + SCRATCH_REG_OFF, SCRATCH_REG_VALUE);
+
+ pr_info("LiteX SoC Controller driver initialized");
+
+ return 0;
+}
+
+struct litex_soc_ctrl_device {
+ void __iomem *base;
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id litex_soc_ctrl_of_match[] = {
+ {.compatible = "litex,soc-controller"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+#endif /* CONFIG_OF */
+
+static int litex_soc_ctrl_probe(struct platform_device *pdev)
+{
+ struct litex_soc_ctrl_device *soc_ctrl_dev;
+
+ soc_ctrl_dev = devm_kzalloc(&pdev->dev, sizeof(*soc_ctrl_dev), GFP_KERNEL);
+ if (!soc_ctrl_dev)
+ return -ENOMEM;
+
+ soc_ctrl_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(soc_ctrl_dev->base))
+ return PTR_ERR(soc_ctrl_dev->base);
+
+ return litex_check_csr_access(soc_ctrl_dev->base);
+}
+
+static struct platform_driver litex_soc_ctrl_driver = {
+ .driver = {
+ .name = "litex-soc-controller",
+ .of_match_table = of_match_ptr(litex_soc_ctrl_of_match)
+ },
+ .probe = litex_soc_ctrl_probe,
+};
+
+module_platform_driver(litex_soc_ctrl_driver);
+MODULE_DESCRIPTION("LiteX SoC Controller driver");
+MODULE_AUTHOR("Antmicro <www.antmicro.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 59a56cd790ec..fdd8bc08569e 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -17,6 +17,15 @@ config MTK_CMDQ
time limitation, such as updating display configuration during the
vblank.
+config MTK_DEVAPC
+ tristate "Mediatek Device APC Support"
+ help
+ Say yes here to enable support for Mediatek Device APC driver.
+ This driver is mainly used to handle the violation which catches
+ unexpected transaction.
+ The violation information is logged for further analysis or
+ countermeasures.
+
config MTK_INFRACFG
bool "MediaTek INFRACFG Support"
select REGMAP
@@ -44,9 +53,22 @@ config MTK_SCPSYS
Say yes here to add support for the MediaTek SCPSYS power domain
driver.
+config MTK_SCPSYS_PM_DOMAINS
+ bool "MediaTek SCPSYS generic power domain"
+ default ARCH_MEDIATEK
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ select REGMAP
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, the System
+ Control Processor System (SCPSYS) has several power management related
+ tasks in the system.
+
config MTK_MMSYS
bool "MediaTek MMSYS Support"
default ARCH_MEDIATEK
+ depends on HAS_IOMEM
help
Say yes here to add support for the MediaTek Multimedia
Subsystem (MMSYS).
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 01f9f873634a..b6908db534c2 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
+obj-$(CONFIG_MTK_DEVAPC) += mtk-devapc.o
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
+obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
new file mode 100644
index 000000000000..3e8ee5dabb43
--- /dev/null
+++ b/drivers/soc/mediatek/mt8173-pm-domains.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8173_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8173_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8173-power.h>
+
+/*
+ * MT8173 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1),
+ },
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT),
+ },
+ },
+};
+
+static const struct scpsys_soc_data mt8173_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8173,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8173),
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+};
+
+#endif /* __SOC_MEDIATEK_MT8173_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
new file mode 100644
index 000000000000..8d996c5d2682
--- /dev/null
+++ b/drivers/soc/mediatek/mt8183-pm-domains.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8183_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8183_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8183-power.h>
+
+/*
+ * MT8183 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ [MT8183_POWER_DOMAIN_AUDIO] = {
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = 0x0314,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8183_POWER_DOMAIN_CONN] = {
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = 0x032c,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_CONN, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ },
+ },
+ [MT8183_POWER_DOMAIN_MFG_ASYNC] = {
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = 0x0334,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ },
+ [MT8183_POWER_DOMAIN_MFG] = {
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = 0x0338,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8183_POWER_DOMAIN_MFG_CORE0] = {
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x034c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8183_POWER_DOMAIN_MFG_CORE1] = {
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x0310,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8183_POWER_DOMAIN_MFG_2D] = {
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = 0x0348,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_1_MFG, MT8183_TOP_AXI_PROT_EN_1_SET,
+ MT8183_TOP_AXI_PROT_EN_1_CLR, MT8183_TOP_AXI_PROT_EN_STA1_1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MFG, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ },
+ },
+ [MT8183_POWER_DOMAIN_DISP] = {
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = 0x030c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_1_DISP, MT8183_TOP_AXI_PROT_EN_1_SET,
+ MT8183_TOP_AXI_PROT_EN_1_CLR, MT8183_TOP_AXI_PROT_EN_STA1_1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_DISP, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_DISP,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_CAM] = {
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x0344,
+ .sram_pdn_bits = GENMASK(9, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_CAM, MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR, MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_CAM, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR_IGN(MT8183_TOP_AXI_PROT_EN_MM_CAM_2ND,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_CAM,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_ISP] = {
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = 0x0308,
+ .sram_pdn_bits = GENMASK(9, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_ISP,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR_IGN(MT8183_TOP_AXI_PROT_EN_MM_ISP_2ND,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_ISP,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VDEC] = {
+ .sta_mask = BIT(31),
+ .ctl_offs = 0x0300,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_VDEC,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VENC] = {
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = 0x0304,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_VENC,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VPU_TOP] = {
+ .sta_mask = BIT(26),
+ .ctl_offs = 0x0324,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_VPU_TOP,
+ MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR,
+ MT8183_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP_2ND,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_VPU_TOP,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VPU_CORE0] = {
+ .sta_mask = BIT(27),
+ .ctl_offs = 0x33c,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0_2ND,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ },
+ .caps = MTK_SCPD_SRAM_ISO,
+ },
+ [MT8183_POWER_DOMAIN_VPU_CORE1] = {
+ .sta_mask = BIT(28),
+ .ctl_offs = 0x0340,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1_2ND,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ },
+ .caps = MTK_SCPD_SRAM_ISO,
+ },
+};
+
+static const struct scpsys_soc_data mt8183_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8183,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8183),
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184
+};
+
+#endif /* __SOC_MEDIATEK_MT8183_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
new file mode 100644
index 000000000000..0fdf6dc6231f
--- /dev/null
+++ b/drivers/soc/mediatek/mt8192-pm-domains.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8192_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8192_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8192-power.h>
+
+/*
+ * MT8192 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ [MT8192_POWER_DOMAIN_AUDIO] = {
+ .sta_mask = BIT(21),
+ .ctl_offs = 0x0354,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_AUDIO,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_CONN] = {
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = 0x0304,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_CONN,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_CONN_2ND,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_1_CONN,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8192_POWER_DOMAIN_MFG0] = {
+ .sta_mask = BIT(2),
+ .ctl_offs = 0x0308,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG1] = {
+ .sta_mask = BIT(3),
+ .ctl_offs = 0x030c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_1_MFG1,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_MFG1,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MFG1,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_MFG1_2ND,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_MFG2] = {
+ .sta_mask = BIT(4),
+ .ctl_offs = 0x0310,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG3] = {
+ .sta_mask = BIT(5),
+ .ctl_offs = 0x0314,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG4] = {
+ .sta_mask = BIT(6),
+ .ctl_offs = 0x0318,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG5] = {
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x031c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG6] = {
+ .sta_mask = BIT(8),
+ .ctl_offs = 0x0320,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_DISP] = {
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x0350,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8192_TOP_AXI_PROT_EN_MM_DISP,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR_IGN(MT8192_TOP_AXI_PROT_EN_MM_2_DISP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_DISP,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_DISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_DISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_IPE] = {
+ .sta_mask = BIT(14),
+ .ctl_offs = 0x0338,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_IPE,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_IPE_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_ISP] = {
+ .sta_mask = BIT(12),
+ .ctl_offs = 0x0330,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_ISP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_ISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_ISP2] = {
+ .sta_mask = BIT(13),
+ .ctl_offs = 0x0334,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_ISP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_ISP2_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_MDP] = {
+ .sta_mask = BIT(19),
+ .ctl_offs = 0x034c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_MDP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_VENC] = {
+ .sta_mask = BIT(17),
+ .ctl_offs = 0x0344,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VENC,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VENC_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_VDEC] = {
+ .sta_mask = BIT(15),
+ .ctl_offs = 0x033c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VDEC,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VDEC_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_VDEC2] = {
+ .sta_mask = BIT(16),
+ .ctl_offs = 0x0340,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_CAM] = {
+ .sta_mask = BIT(23),
+ .ctl_offs = 0x035c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_CAM,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_CAM,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_1_CAM,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_CAM_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_VDNR_CAM,
+ MT8192_TOP_AXI_PROT_EN_VDNR_SET,
+ MT8192_TOP_AXI_PROT_EN_VDNR_CLR,
+ MT8192_TOP_AXI_PROT_EN_VDNR_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_CAM_RAWA] = {
+ .sta_mask = BIT(24),
+ .ctl_offs = 0x0360,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_CAM_RAWB] = {
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x0364,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_CAM_RAWC] = {
+ .sta_mask = BIT(26),
+ .ctl_offs = 0x0368,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+};
+
+static const struct scpsys_soc_data mt8192_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8192,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8192),
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
+};
+
+#endif /* __SOC_MEDIATEK_MT8192_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 505651b0d715..280d3bd9f675 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -70,14 +70,7 @@ int cmdq_dev_get_client_reg(struct device *dev,
}
EXPORT_SYMBOL(cmdq_dev_get_client_reg);
-static void cmdq_client_timeout(struct timer_list *t)
-{
- struct cmdq_client *client = from_timer(client, t, timer);
-
- dev_err(client->client.dev, "cmdq timeout!\n");
-}
-
-struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
{
struct cmdq_client *client;
@@ -85,12 +78,6 @@ struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
if (!client)
return (struct cmdq_client *)-ENOMEM;
- client->timeout_ms = timeout;
- if (timeout != CMDQ_NO_TIMEOUT) {
- spin_lock_init(&client->lock);
- timer_setup(&client->timer, cmdq_client_timeout, 0);
- }
- client->pkt_cnt = 0;
client->client.dev = dev;
client->client.tx_block = false;
client->client.knows_txdone = true;
@@ -112,11 +99,6 @@ EXPORT_SYMBOL(cmdq_mbox_create);
void cmdq_mbox_destroy(struct cmdq_client *client)
{
- if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
- spin_lock(&client->lock);
- del_timer_sync(&client->timer);
- spin_unlock(&client->lock);
- }
mbox_free_channel(client->chan);
kfree(client);
}
@@ -449,18 +431,6 @@ static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
struct cmdq_task_cb *cb = &pkt->cb;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
- if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
- unsigned long flags = 0;
-
- spin_lock_irqsave(&client->lock, flags);
- if (--client->pkt_cnt == 0)
- del_timer(&client->timer);
- else
- mod_timer(&client->timer, jiffies +
- msecs_to_jiffies(client->timeout_ms));
- spin_unlock_irqrestore(&client->lock, flags);
- }
-
dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
pkt->cmd_buf_size, DMA_TO_DEVICE);
if (cb->cb) {
@@ -473,7 +443,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
void *data)
{
int err;
- unsigned long flags = 0;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
pkt->cb.cb = cb;
@@ -484,14 +453,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
pkt->cmd_buf_size, DMA_TO_DEVICE);
- if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
- spin_lock_irqsave(&client->lock, flags);
- if (client->pkt_cnt++ == 0)
- mod_timer(&client->timer, jiffies +
- msecs_to_jiffies(client->timeout_ms));
- spin_unlock_irqrestore(&client->lock, flags);
- }
-
err = mbox_send_message(client->chan, pkt);
if (err < 0)
return err;
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
new file mode 100644
index 000000000000..f1cea041dc5a
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#define VIO_MOD_TO_REG_IND(m) ((m) / 32)
+#define VIO_MOD_TO_REG_OFF(m) ((m) % 32)
+
+struct mtk_devapc_vio_dbgs {
+ union {
+ u32 vio_dbg0;
+ struct {
+ u32 mstid:16;
+ u32 dmnid:6;
+ u32 vio_w:1;
+ u32 vio_r:1;
+ u32 addr_h:4;
+ u32 resv:4;
+ } dbg0_bits;
+ };
+
+ u32 vio_dbg1;
+};
+
+struct mtk_devapc_data {
+ /* numbers of violation index */
+ u32 vio_idx_num;
+
+ /* reg offset */
+ u32 vio_mask_offset;
+ u32 vio_sta_offset;
+ u32 vio_dbg0_offset;
+ u32 vio_dbg1_offset;
+ u32 apc_con_offset;
+ u32 vio_shift_sta_offset;
+ u32 vio_shift_sel_offset;
+ u32 vio_shift_con_offset;
+};
+
+struct mtk_devapc_context {
+ struct device *dev;
+ void __iomem *infra_base;
+ struct clk *infra_clk;
+ const struct mtk_devapc_data *data;
+};
+
+static void clear_vio_status(struct mtk_devapc_context *ctx)
+{
+ void __iomem *reg;
+ int i;
+
+ reg = ctx->infra_base + ctx->data->vio_sta_offset;
+
+ for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
+ writel(GENMASK(31, 0), reg + 4 * i);
+
+ writel(GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1, 0),
+ reg + 4 * i);
+}
+
+static void mask_module_irq(struct mtk_devapc_context *ctx, bool mask)
+{
+ void __iomem *reg;
+ u32 val;
+ int i;
+
+ reg = ctx->infra_base + ctx->data->vio_mask_offset;
+
+ if (mask)
+ val = GENMASK(31, 0);
+ else
+ val = 0;
+
+ for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
+ writel(val, reg + 4 * i);
+
+ val = readl(reg + 4 * i);
+ if (mask)
+ val |= GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1,
+ 0);
+ else
+ val &= ~GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1,
+ 0);
+
+ writel(val, reg + 4 * i);
+}
+
+#define PHY_DEVAPC_TIMEOUT 0x10000
+
+/*
+ * devapc_sync_vio_dbg - do "shift" mechansim" to get full violation information.
+ * shift mechanism is depends on devapc hardware design.
+ * Mediatek devapc set multiple slaves as a group.
+ * When violation is triggered, violation info is kept
+ * inside devapc hardware.
+ * Driver should do shift mechansim to sync full violation
+ * info to VIO_DBGs registers.
+ *
+ */
+static int devapc_sync_vio_dbg(struct mtk_devapc_context *ctx)
+{
+ void __iomem *pd_vio_shift_sta_reg;
+ void __iomem *pd_vio_shift_sel_reg;
+ void __iomem *pd_vio_shift_con_reg;
+ int min_shift_group;
+ int ret;
+ u32 val;
+
+ pd_vio_shift_sta_reg = ctx->infra_base +
+ ctx->data->vio_shift_sta_offset;
+ pd_vio_shift_sel_reg = ctx->infra_base +
+ ctx->data->vio_shift_sel_offset;
+ pd_vio_shift_con_reg = ctx->infra_base +
+ ctx->data->vio_shift_con_offset;
+
+ /* Find the minimum shift group which has violation */
+ val = readl(pd_vio_shift_sta_reg);
+ if (!val)
+ return false;
+
+ min_shift_group = __ffs(val);
+
+ /* Assign the group to sync */
+ writel(0x1 << min_shift_group, pd_vio_shift_sel_reg);
+
+ /* Start syncing */
+ writel(0x1, pd_vio_shift_con_reg);
+
+ ret = readl_poll_timeout(pd_vio_shift_con_reg, val, val == 0x3, 0,
+ PHY_DEVAPC_TIMEOUT);
+ if (ret) {
+ dev_err(ctx->dev, "%s: Shift violation info failed\n", __func__);
+ return false;
+ }
+
+ /* Stop syncing */
+ writel(0x0, pd_vio_shift_con_reg);
+
+ /* Write clear */
+ writel(0x1 << min_shift_group, pd_vio_shift_sta_reg);
+
+ return true;
+}
+
+/*
+ * devapc_extract_vio_dbg - extract full violation information after doing
+ * shift mechanism.
+ */
+static void devapc_extract_vio_dbg(struct mtk_devapc_context *ctx)
+{
+ struct mtk_devapc_vio_dbgs vio_dbgs;
+ void __iomem *vio_dbg0_reg;
+ void __iomem *vio_dbg1_reg;
+
+ vio_dbg0_reg = ctx->infra_base + ctx->data->vio_dbg0_offset;
+ vio_dbg1_reg = ctx->infra_base + ctx->data->vio_dbg1_offset;
+
+ vio_dbgs.vio_dbg0 = readl(vio_dbg0_reg);
+ vio_dbgs.vio_dbg1 = readl(vio_dbg1_reg);
+
+ /* Print violation information */
+ if (vio_dbgs.dbg0_bits.vio_w)
+ dev_info(ctx->dev, "Write Violation\n");
+ else if (vio_dbgs.dbg0_bits.vio_r)
+ dev_info(ctx->dev, "Read Violation\n");
+
+ dev_info(ctx->dev, "Bus ID:0x%x, Dom ID:0x%x, Vio Addr:0x%x\n",
+ vio_dbgs.dbg0_bits.mstid, vio_dbgs.dbg0_bits.dmnid,
+ vio_dbgs.vio_dbg1);
+}
+
+/*
+ * devapc_violation_irq - the devapc Interrupt Service Routine (ISR) will dump
+ * violation information including which master violates
+ * access slave.
+ */
+static irqreturn_t devapc_violation_irq(int irq_number, void *data)
+{
+ struct mtk_devapc_context *ctx = data;
+
+ while (devapc_sync_vio_dbg(ctx))
+ devapc_extract_vio_dbg(ctx);
+
+ clear_vio_status(ctx);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * start_devapc - unmask slave's irq to start receiving devapc violation.
+ */
+static void start_devapc(struct mtk_devapc_context *ctx)
+{
+ writel(BIT(31), ctx->infra_base + ctx->data->apc_con_offset);
+
+ mask_module_irq(ctx, false);
+}
+
+/*
+ * stop_devapc - mask slave's irq to stop service.
+ */
+static void stop_devapc(struct mtk_devapc_context *ctx)
+{
+ mask_module_irq(ctx, true);
+
+ writel(BIT(2), ctx->infra_base + ctx->data->apc_con_offset);
+}
+
+static const struct mtk_devapc_data devapc_mt6779 = {
+ .vio_idx_num = 511,
+ .vio_mask_offset = 0x0,
+ .vio_sta_offset = 0x400,
+ .vio_dbg0_offset = 0x900,
+ .vio_dbg1_offset = 0x904,
+ .apc_con_offset = 0xF00,
+ .vio_shift_sta_offset = 0xF10,
+ .vio_shift_sel_offset = 0xF14,
+ .vio_shift_con_offset = 0xF20,
+};
+
+static const struct of_device_id mtk_devapc_dt_match[] = {
+ {
+ .compatible = "mediatek,mt6779-devapc",
+ .data = &devapc_mt6779,
+ }, {
+ },
+};
+
+static int mtk_devapc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct mtk_devapc_context *ctx;
+ u32 devapc_irq;
+ int ret;
+
+ if (IS_ERR(node))
+ return -ENODEV;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->data = of_device_get_match_data(&pdev->dev);
+ ctx->dev = &pdev->dev;
+
+ ctx->infra_base = of_iomap(node, 0);
+ if (!ctx->infra_base)
+ return -EINVAL;
+
+ devapc_irq = irq_of_parse_and_map(node, 0);
+ if (!devapc_irq)
+ return -EINVAL;
+
+ ctx->infra_clk = devm_clk_get(&pdev->dev, "devapc-infra-clock");
+ if (IS_ERR(ctx->infra_clk))
+ return -EINVAL;
+
+ if (clk_prepare_enable(ctx->infra_clk))
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
+ IRQF_TRIGGER_NONE, "devapc", ctx);
+ if (ret) {
+ clk_disable_unprepare(ctx->infra_clk);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ctx);
+
+ start_devapc(ctx);
+
+ return 0;
+}
+
+static int mtk_devapc_remove(struct platform_device *pdev)
+{
+ struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
+
+ stop_devapc(ctx);
+
+ clk_disable_unprepare(ctx->infra_clk);
+
+ return 0;
+}
+
+static struct platform_driver mtk_devapc_driver = {
+ .probe = mtk_devapc_probe,
+ .remove = mtk_devapc_remove,
+ .driver = {
+ .name = "mtk-devapc",
+ .of_match_table = mtk_devapc_dt_match,
+ },
+};
+
+module_platform_driver(mtk_devapc_driver);
+
+MODULE_DESCRIPTION("Mediatek Device APC Driver");
+MODULE_AUTHOR("Neal Liu <neal.liu@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
index 4a123796aad3..0590b68e0d78 100644
--- a/drivers/soc/mediatek/mtk-infracfg.c
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -12,11 +12,6 @@
#define MTK_POLL_DELAY_US 10
#define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ))
-#define INFRA_TOPAXI_PROTECTEN 0x0220
-#define INFRA_TOPAXI_PROTECTSTA1 0x0228
-#define INFRA_TOPAXI_PROTECTEN_SET 0x0260
-#define INFRA_TOPAXI_PROTECTEN_CLR 0x0264
-
/**
* mtk_infracfg_set_bus_protection - enable bus protection
* @infracfg: The infracfg regmap
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index a55f25511173..18f93979e14a 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -5,13 +5,11 @@
*/
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
-#include "../../gpu/drm/mediatek/mtk_drm_ddp.h"
-#include "../../gpu/drm/mediatek/mtk_drm_ddp_comp.h"
-
#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
@@ -308,15 +306,12 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
struct platform_device *clks;
struct platform_device *drm;
void __iomem *config_regs;
- struct resource *mem;
int ret;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- config_regs = devm_ioremap_resource(dev, mem);
+ config_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(config_regs)) {
ret = PTR_ERR(config_regs);
- dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
- ret);
+ dev_err(dev, "Failed to ioremap mmsys registers: %d\n", ret);
return ret;
}
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
new file mode 100644
index 000000000000..fb70cb3b07b3
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Collabora Ltd.
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_clk.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+
+#include "mt8173-pm-domains.h"
+#include "mt8183-pm-domains.h"
+#include "mt8192-pm-domains.h"
+
+#define MTK_POLL_DELAY_US 10
+#define MTK_POLL_TIMEOUT USEC_PER_SEC
+
+#define PWR_RST_B_BIT BIT(0)
+#define PWR_ISO_BIT BIT(1)
+#define PWR_ON_BIT BIT(2)
+#define PWR_ON_2ND_BIT BIT(3)
+#define PWR_CLK_DIS_BIT BIT(4)
+#define PWR_SRAM_CLKISO_BIT BIT(5)
+#define PWR_SRAM_ISOINT_B_BIT BIT(6)
+
+struct scpsys_domain {
+ struct generic_pm_domain genpd;
+ const struct scpsys_domain_data *data;
+ struct scpsys *scpsys;
+ int num_clks;
+ struct clk_bulk_data *clks;
+ int num_subsys_clks;
+ struct clk_bulk_data *subsys_clks;
+ struct regmap *infracfg;
+ struct regmap *smi;
+};
+
+struct scpsys {
+ struct device *dev;
+ struct regmap *base;
+ const struct scpsys_soc_data *soc_data;
+ struct genpd_onecell_data pd_data;
+ struct generic_pm_domain *domains[];
+};
+
+#define to_scpsys_domain(gpd) container_of(gpd, struct scpsys_domain, genpd)
+
+static bool scpsys_domain_is_on(struct scpsys_domain *pd)
+{
+ struct scpsys *scpsys = pd->scpsys;
+ u32 status, status2;
+
+ regmap_read(scpsys->base, scpsys->soc_data->pwr_sta_offs, &status);
+ status &= pd->data->sta_mask;
+
+ regmap_read(scpsys->base, scpsys->soc_data->pwr_sta2nd_offs, &status2);
+ status2 &= pd->data->sta_mask;
+
+ /* A domain is on when both status bits are set. */
+ return status && status2;
+}
+
+static int scpsys_sram_enable(struct scpsys_domain *pd)
+{
+ u32 pdn_ack = pd->data->sram_pdn_ack_bits;
+ struct scpsys *scpsys = pd->scpsys;
+ unsigned int tmp;
+ int ret;
+
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ ret = regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
+ (tmp & pdn_ack) == 0, MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
+ udelay(1);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
+ }
+
+ return 0;
+}
+
+static int scpsys_sram_disable(struct scpsys_domain *pd)
+{
+ u32 pdn_ack = pd->data->sram_pdn_ack_bits;
+ struct scpsys *scpsys = pd->scpsys;
+ unsigned int tmp;
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
+ udelay(1);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
+ }
+
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ return regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
+ (tmp & pdn_ack) == pdn_ack, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+}
+
+static int _scpsys_bus_protect_enable(const struct scpsys_bus_prot_data *bpd, struct regmap *regmap)
+{
+ int i, ret;
+
+ for (i = 0; i < SPM_MAX_BUS_PROT_DATA; i++) {
+ u32 val, mask = bpd[i].bus_prot_mask;
+
+ if (!mask)
+ break;
+
+ if (bpd[i].bus_prot_reg_update)
+ regmap_set_bits(regmap, bpd[i].bus_prot_set, mask);
+ else
+ regmap_write(regmap, bpd[i].bus_prot_set, mask);
+
+ ret = regmap_read_poll_timeout(regmap, bpd[i].bus_prot_sta,
+ val, (val & mask) == mask,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scpsys_bus_protect_enable(struct scpsys_domain *pd)
+{
+ int ret;
+
+ ret = _scpsys_bus_protect_enable(pd->data->bp_infracfg, pd->infracfg);
+ if (ret)
+ return ret;
+
+ return _scpsys_bus_protect_enable(pd->data->bp_smi, pd->smi);
+}
+
+static int _scpsys_bus_protect_disable(const struct scpsys_bus_prot_data *bpd,
+ struct regmap *regmap)
+{
+ int i, ret;
+
+ for (i = SPM_MAX_BUS_PROT_DATA - 1; i >= 0; i--) {
+ u32 val, mask = bpd[i].bus_prot_mask;
+
+ if (!mask)
+ continue;
+
+ if (bpd[i].bus_prot_reg_update)
+ regmap_clear_bits(regmap, bpd[i].bus_prot_clr, mask);
+ else
+ regmap_write(regmap, bpd[i].bus_prot_clr, mask);
+
+ if (bpd[i].ignore_clr_ack)
+ continue;
+
+ ret = regmap_read_poll_timeout(regmap, bpd[i].bus_prot_sta,
+ val, !(val & mask),
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scpsys_bus_protect_disable(struct scpsys_domain *pd)
+{
+ int ret;
+
+ ret = _scpsys_bus_protect_disable(pd->data->bp_smi, pd->smi);
+ if (ret)
+ return ret;
+
+ return _scpsys_bus_protect_disable(pd->data->bp_infracfg, pd->infracfg);
+}
+
+static int scpsys_power_on(struct generic_pm_domain *genpd)
+{
+ struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
+ struct scpsys *scpsys = pd->scpsys;
+ bool tmp;
+ int ret;
+
+ ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ if (ret)
+ return ret;
+
+ /* subsys power on */
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+
+ /* wait until PWR_ACK = 1 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ goto err_pwr_ack;
+
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+
+ ret = clk_bulk_enable(pd->num_subsys_clks, pd->subsys_clks);
+ if (ret)
+ goto err_pwr_ack;
+
+ ret = scpsys_sram_enable(pd);
+ if (ret < 0)
+ goto err_disable_subsys_clks;
+
+ ret = scpsys_bus_protect_disable(pd);
+ if (ret < 0)
+ goto err_disable_sram;
+
+ return 0;
+
+err_disable_sram:
+ scpsys_sram_disable(pd);
+err_disable_subsys_clks:
+ clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
+err_pwr_ack:
+ clk_bulk_disable(pd->num_clks, pd->clks);
+ return ret;
+}
+
+static int scpsys_power_off(struct generic_pm_domain *genpd)
+{
+ struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
+ struct scpsys *scpsys = pd->scpsys;
+ bool tmp;
+ int ret;
+
+ ret = scpsys_bus_protect_enable(pd);
+ if (ret < 0)
+ return ret;
+
+ ret = scpsys_sram_disable(pd);
+ if (ret < 0)
+ return ret;
+
+ clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
+
+ /* subsys power off */
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+
+ /* wait until PWR_ACK = 0 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, !tmp, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ clk_bulk_disable(pd->num_clks, pd->clks);
+
+ return 0;
+}
+
+static struct
+generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_node *node)
+{
+ const struct scpsys_domain_data *domain_data;
+ struct scpsys_domain *pd;
+ struct property *prop;
+ const char *clk_name;
+ int i, ret, num_clks;
+ struct clk *clk;
+ int clk_ind = 0;
+ u32 id;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret) {
+ dev_err(scpsys->dev, "%pOF: failed to retrieve domain id from reg: %d\n",
+ node, ret);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (id >= scpsys->soc_data->num_domains) {
+ dev_err(scpsys->dev, "%pOF: invalid domain id %d\n", node, id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ domain_data = &scpsys->soc_data->domains_data[id];
+ if (domain_data->sta_mask == 0) {
+ dev_err(scpsys->dev, "%pOF: undefined domain id %d\n", node, id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pd = devm_kzalloc(scpsys->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->data = domain_data;
+ pd->scpsys = scpsys;
+
+ pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");
+ if (IS_ERR(pd->infracfg))
+ return ERR_CAST(pd->infracfg);
+
+ pd->smi = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,smi");
+ if (IS_ERR(pd->smi))
+ return ERR_CAST(pd->smi);
+
+ num_clks = of_clk_get_parent_count(node);
+ if (num_clks > 0) {
+ /* Calculate number of subsys_clks */
+ of_property_for_each_string(node, "clock-names", prop, clk_name) {
+ char *subsys;
+
+ subsys = strchr(clk_name, '-');
+ if (subsys)
+ pd->num_subsys_clks++;
+ else
+ pd->num_clks++;
+ }
+
+ pd->clks = devm_kcalloc(scpsys->dev, pd->num_clks, sizeof(*pd->clks), GFP_KERNEL);
+ if (!pd->clks)
+ return ERR_PTR(-ENOMEM);
+
+ pd->subsys_clks = devm_kcalloc(scpsys->dev, pd->num_subsys_clks,
+ sizeof(*pd->subsys_clks), GFP_KERNEL);
+ if (!pd->subsys_clks)
+ return ERR_PTR(-ENOMEM);
+
+ }
+
+ for (i = 0; i < pd->num_clks; i++) {
+ clk = of_clk_get(node, i);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err_probe(scpsys->dev, ret,
+ "%pOF: failed to get clk at index %d: %d\n", node, i, ret);
+ goto err_put_clocks;
+ }
+
+ pd->clks[clk_ind++].clk = clk;
+ }
+
+ for (i = 0; i < pd->num_subsys_clks; i++) {
+ clk = of_clk_get(node, i + clk_ind);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err_probe(scpsys->dev, ret,
+ "%pOF: failed to get clk at index %d: %d\n", node,
+ i + clk_ind, ret);
+ goto err_put_subsys_clocks;
+ }
+
+ pd->subsys_clks[i].clk = clk;
+ }
+
+ ret = clk_bulk_prepare(pd->num_clks, pd->clks);
+ if (ret)
+ goto err_put_subsys_clocks;
+
+ ret = clk_bulk_prepare(pd->num_subsys_clks, pd->subsys_clks);
+ if (ret)
+ goto err_unprepare_clocks;
+
+ /*
+ * Initially turn on all domains to make the domains usable
+ * with !CONFIG_PM and to get the hardware in sync with the
+ * software. The unused domains will be switched off during
+ * late_init time.
+ */
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) {
+ if (scpsys_domain_is_on(pd))
+ dev_warn(scpsys->dev,
+ "%pOF: A default off power domain has been ON\n", node);
+ } else {
+ ret = scpsys_power_on(&pd->genpd);
+ if (ret < 0) {
+ dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
+ goto err_unprepare_clocks;
+ }
+ }
+
+ if (scpsys->domains[id]) {
+ ret = -EINVAL;
+ dev_err(scpsys->dev,
+ "power domain with id %d already exists, check your device-tree\n", id);
+ goto err_unprepare_subsys_clocks;
+ }
+
+ pd->genpd.name = node->name;
+ pd->genpd.power_off = scpsys_power_off;
+ pd->genpd.power_on = scpsys_power_on;
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF))
+ pm_genpd_init(&pd->genpd, NULL, true);
+ else
+ pm_genpd_init(&pd->genpd, NULL, false);
+
+ scpsys->domains[id] = &pd->genpd;
+
+ return scpsys->pd_data.domains[id];
+
+err_unprepare_subsys_clocks:
+ clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+err_unprepare_clocks:
+ clk_bulk_unprepare(pd->num_clks, pd->clks);
+err_put_subsys_clocks:
+ clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+err_put_clocks:
+ clk_bulk_put(pd->num_clks, pd->clks);
+ return ERR_PTR(ret);
+}
+
+static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *parent)
+{
+ struct generic_pm_domain *child_pd, *parent_pd;
+ struct device_node *child;
+ int ret;
+
+ for_each_child_of_node(parent, child) {
+ u32 id;
+
+ ret = of_property_read_u32(parent, "reg", &id);
+ if (ret) {
+ dev_err(scpsys->dev, "%pOF: failed to get parent domain id\n", child);
+ goto err_put_node;
+ }
+
+ if (!scpsys->pd_data.domains[id]) {
+ ret = -EINVAL;
+ dev_err(scpsys->dev, "power domain with id %d does not exist\n", id);
+ goto err_put_node;
+ }
+
+ parent_pd = scpsys->pd_data.domains[id];
+
+ child_pd = scpsys_add_one_domain(scpsys, child);
+ if (IS_ERR(child_pd)) {
+ ret = PTR_ERR(child_pd);
+ dev_err(scpsys->dev, "%pOF: failed to get child domain id\n", child);
+ goto err_put_node;
+ }
+
+ ret = pm_genpd_add_subdomain(parent_pd, child_pd);
+ if (ret) {
+ dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
+ child_pd->name, parent_pd->name);
+ goto err_put_node;
+ } else {
+ dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
+ child_pd->name);
+ }
+
+ /* recursive call to add all subdomains */
+ ret = scpsys_add_subdomain(scpsys, child);
+ if (ret)
+ goto err_put_node;
+ }
+
+ return 0;
+
+err_put_node:
+ of_node_put(child);
+ return ret;
+}
+
+static void scpsys_remove_one_domain(struct scpsys_domain *pd)
+{
+ int ret;
+
+ if (scpsys_domain_is_on(pd))
+ scpsys_power_off(&pd->genpd);
+
+ /*
+ * We're in the error cleanup already, so we only complain,
+ * but won't emit another error on top of the original one.
+ */
+ ret = pm_genpd_remove(&pd->genpd);
+ if (ret < 0)
+ dev_err(pd->scpsys->dev,
+ "failed to remove domain '%s' : %d - state may be inconsistent\n",
+ pd->genpd.name, ret);
+
+ clk_bulk_unprepare(pd->num_clks, pd->clks);
+ clk_bulk_put(pd->num_clks, pd->clks);
+
+ clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+ clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+}
+
+static void scpsys_domain_cleanup(struct scpsys *scpsys)
+{
+ struct generic_pm_domain *genpd;
+ struct scpsys_domain *pd;
+ int i;
+
+ for (i = scpsys->pd_data.num_domains - 1; i >= 0; i--) {
+ genpd = scpsys->pd_data.domains[i];
+ if (genpd) {
+ pd = to_scpsys_domain(genpd);
+ scpsys_remove_one_domain(pd);
+ }
+ }
+}
+
+static const struct of_device_id scpsys_of_match[] = {
+ {
+ .compatible = "mediatek,mt8173-power-controller",
+ .data = &mt8173_scpsys_data,
+ },
+ {
+ .compatible = "mediatek,mt8183-power-controller",
+ .data = &mt8183_scpsys_data,
+ },
+ {
+ .compatible = "mediatek,mt8192-power-controller",
+ .data = &mt8192_scpsys_data,
+ },
+ { }
+};
+
+static int scpsys_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct scpsys_soc_data *soc;
+ struct device_node *node;
+ struct device *parent;
+ struct scpsys *scpsys;
+ int ret;
+
+ soc = of_device_get_match_data(&pdev->dev);
+ if (!soc) {
+ dev_err(&pdev->dev, "no power controller data\n");
+ return -EINVAL;
+ }
+
+ scpsys = devm_kzalloc(dev, struct_size(scpsys, domains, soc->num_domains), GFP_KERNEL);
+ if (!scpsys)
+ return -ENOMEM;
+
+ scpsys->dev = dev;
+ scpsys->soc_data = soc;
+
+ scpsys->pd_data.domains = scpsys->domains;
+ scpsys->pd_data.num_domains = soc->num_domains;
+
+ parent = dev->parent;
+ if (!parent) {
+ dev_err(dev, "no parent for syscon devices\n");
+ return -ENODEV;
+ }
+
+ scpsys->base = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(scpsys->base)) {
+ dev_err(dev, "no regmap available\n");
+ return PTR_ERR(scpsys->base);
+ }
+
+ ret = -ENODEV;
+ for_each_available_child_of_node(np, node) {
+ struct generic_pm_domain *domain;
+
+ domain = scpsys_add_one_domain(scpsys, node);
+ if (IS_ERR(domain)) {
+ ret = PTR_ERR(domain);
+ of_node_put(node);
+ goto err_cleanup_domains;
+ }
+
+ ret = scpsys_add_subdomain(scpsys, node);
+ if (ret) {
+ of_node_put(node);
+ goto err_cleanup_domains;
+ }
+ }
+
+ if (ret) {
+ dev_dbg(dev, "no power domains present\n");
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_onecell(np, &scpsys->pd_data);
+ if (ret) {
+ dev_err(dev, "failed to add provider: %d\n", ret);
+ goto err_cleanup_domains;
+ }
+
+ return 0;
+
+err_cleanup_domains:
+ scpsys_domain_cleanup(scpsys);
+ return ret;
+}
+
+static struct platform_driver scpsys_pm_domain_driver = {
+ .probe = scpsys_probe,
+ .driver = {
+ .name = "mtk-power-controller",
+ .suppress_bind_attrs = true,
+ .of_match_table = scpsys_of_match,
+ },
+};
+builtin_platform_driver(scpsys_pm_domain_driver);
diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
new file mode 100644
index 000000000000..a2f4d8f97e05
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-pm-domains.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MTK_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MTK_PM_DOMAINS_H
+
+#define MTK_SCPD_ACTIVE_WAKEUP BIT(0)
+#define MTK_SCPD_FWAIT_SRAM BIT(1)
+#define MTK_SCPD_SRAM_ISO BIT(2)
+#define MTK_SCPD_KEEP_DEFAULT_OFF BIT(3)
+#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
+
+#define SPM_VDE_PWR_CON 0x0210
+#define SPM_MFG_PWR_CON 0x0214
+#define SPM_VEN_PWR_CON 0x0230
+#define SPM_ISP_PWR_CON 0x0238
+#define SPM_DIS_PWR_CON 0x023c
+#define SPM_VEN2_PWR_CON 0x0298
+#define SPM_AUDIO_PWR_CON 0x029c
+#define SPM_MFG_2D_PWR_CON 0x02c0
+#define SPM_MFG_ASYNC_PWR_CON 0x02c4
+#define SPM_USB_PWR_CON 0x02cc
+
+#define SPM_PWR_STATUS 0x060c
+#define SPM_PWR_STATUS_2ND 0x0610
+
+#define PWR_STATUS_CONN BIT(1)
+#define PWR_STATUS_DISP BIT(3)
+#define PWR_STATUS_MFG BIT(4)
+#define PWR_STATUS_ISP BIT(5)
+#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_VENC_LT BIT(20)
+#define PWR_STATUS_VENC BIT(21)
+#define PWR_STATUS_MFG_2D BIT(22)
+#define PWR_STATUS_MFG_ASYNC BIT(23)
+#define PWR_STATUS_AUDIO BIT(24)
+#define PWR_STATUS_USB BIT(25)
+
+#define SPM_MAX_BUS_PROT_DATA 5
+
+#define _BUS_PROT(_mask, _set, _clr, _sta, _update, _ignore) { \
+ .bus_prot_mask = (_mask), \
+ .bus_prot_set = _set, \
+ .bus_prot_clr = _clr, \
+ .bus_prot_sta = _sta, \
+ .bus_prot_reg_update = _update, \
+ .ignore_clr_ack = _ignore, \
+ }
+
+#define BUS_PROT_WR(_mask, _set, _clr, _sta) \
+ _BUS_PROT(_mask, _set, _clr, _sta, false, false)
+
+#define BUS_PROT_WR_IGN(_mask, _set, _clr, _sta) \
+ _BUS_PROT(_mask, _set, _clr, _sta, false, true)
+
+#define BUS_PROT_UPDATE(_mask, _set, _clr, _sta) \
+ _BUS_PROT(_mask, _set, _clr, _sta, true, false)
+
+#define BUS_PROT_UPDATE_TOPAXI(_mask) \
+ BUS_PROT_UPDATE(_mask, \
+ INFRA_TOPAXI_PROTECTEN, \
+ INFRA_TOPAXI_PROTECTEN_CLR, \
+ INFRA_TOPAXI_PROTECTSTA1)
+
+struct scpsys_bus_prot_data {
+ u32 bus_prot_mask;
+ u32 bus_prot_set;
+ u32 bus_prot_clr;
+ u32 bus_prot_sta;
+ bool bus_prot_reg_update;
+ bool ignore_clr_ack;
+};
+
+#define MAX_SUBSYS_CLKS 10
+
+/**
+ * struct scpsys_domain_data - scp domain data for power on/off flow
+ * @sta_mask: The mask for power on/off status bit.
+ * @ctl_offs: The offset for main power control register.
+ * @sram_pdn_bits: The mask for sram power control bits.
+ * @sram_pdn_ack_bits: The mask for sram power control acked bits.
+ * @caps: The flag for active wake-up action.
+ * @bp_infracfg: bus protection for infracfg subsystem
+ * @bp_smi: bus protection for smi subsystem
+ */
+struct scpsys_domain_data {
+ u32 sta_mask;
+ int ctl_offs;
+ u32 sram_pdn_bits;
+ u32 sram_pdn_ack_bits;
+ u8 caps;
+ const struct scpsys_bus_prot_data bp_infracfg[SPM_MAX_BUS_PROT_DATA];
+ const struct scpsys_bus_prot_data bp_smi[SPM_MAX_BUS_PROT_DATA];
+};
+
+struct scpsys_soc_data {
+ const struct scpsys_domain_data *domains_data;
+ int num_domains;
+ int pwr_sta_offs;
+ int pwr_sta2nd_offs;
+};
+
+#endif /* __SOC_MEDIATEK_MTK_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index f669d3754627..ca75b14931ec 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -524,6 +524,7 @@ static void mtk_register_power_domains(struct platform_device *pdev,
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
+ bool on;
/*
* Initially turn on all domains to make the domains usable
@@ -531,9 +532,9 @@ static void mtk_register_power_domains(struct platform_device *pdev,
* software. The unused domains will be switched off during
* late_init time.
*/
- genpd->power_on(genpd);
+ on = !WARN_ON(genpd->power_on(genpd) < 0);
- pm_genpd_init(genpd, NULL, false);
+ pm_genpd_init(genpd, NULL, !on);
}
/*
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 6a3b69b43ad5..79b568f82a1c 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -17,7 +17,7 @@ config QCOM_AOSS_QMP
Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP).
config QCOM_COMMAND_DB
- bool "Qualcomm Command DB"
+ tristate "Qualcomm Command DB"
depends on ARCH_QCOM || COMPILE_TEST
depends on OF_RESERVED_MEM
help
@@ -108,8 +108,9 @@ config QCOM_RMTFS_MEM
Say y here if you intend to boot the modem remoteproc.
config QCOM_RPMH
- bool "Qualcomm RPM-Hardened (RPMH) Communication"
+ tristate "Qualcomm RPM-Hardened (RPMH) Communication"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on (QCOM_COMMAND_DB || !QCOM_COMMAND_DB)
help
Support for communication with the hardened-RPM blocks in
Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index fc5610603b17..dd872017f345 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
#include <linux/debugfs.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
@@ -340,12 +341,14 @@ static const struct of_device_id cmd_db_match_table[] = {
{ .compatible = "qcom,cmd-db" },
{ }
};
+MODULE_DEVICE_TABLE(of, cmd_db_match_table);
static struct platform_driver cmd_db_dev_driver = {
.probe = cmd_db_dev_probe,
.driver = {
.name = "cmd-db",
.of_match_table = cmd_db_match_table,
+ .suppress_bind_attrs = true,
},
};
@@ -354,3 +357,6 @@ static int __init cmd_db_device_init(void)
return platform_driver_register(&cmd_db_dev_driver);
}
arch_initcall(cmd_db_device_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c
index c20cb92077c0..7886af4fd726 100644
--- a/drivers/soc/qcom/kryo-l2-accessors.c
+++ b/drivers/soc/qcom/kryo-l2-accessors.c
@@ -16,7 +16,7 @@ static DEFINE_RAW_SPINLOCK(l2_access_lock);
/**
* kryo_l2_set_indirect_reg() - write value to an L2 register
* @reg: Address of L2 register.
- * @value: Value to be written to register.
+ * @val: Value to be written to register.
*
* Use architecturally required barriers for ordering between system register
* accesses, and system registers with respect to device memory
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 70fbe70c6213..16b421608e9c 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -45,10 +45,13 @@
#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
+#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
+#define LLCC_TRP_PCB_ACT 0x21f04
+
#define BANK_OFFSET_STRIDE 0x80000
/**
- * llcc_slice_config - Data associated with the llcc slice
+ * struct llcc_slice_config - Data associated with the llcc slice
* @usecase_id: Unique id for the client's use case
* @slice_id: llcc slice id for each client
* @max_cap: The maximum capacity of the cache slice provided in KB
@@ -89,6 +92,7 @@ struct llcc_slice_config {
struct qcom_llcc_config {
const struct llcc_slice_config *sct_data;
int size;
+ bool need_llcc_cfg;
};
static const struct llcc_slice_config sc7180_data[] = {
@@ -119,14 +123,45 @@ static const struct llcc_slice_config sdm845_data[] = {
{ LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
};
+static const struct llcc_slice_config sm8150_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 3072, 1, 0, 0xFF, 0xF00, 0, 0, 0, 1, 0 },
+ { LLCC_MDM, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW , 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1 },
+ { LLCC_CMPTDMA, 15, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_WLHW, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 256, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0 },
+};
+
static const struct qcom_llcc_config sc7180_cfg = {
.sct_data = sc7180_data,
.size = ARRAY_SIZE(sc7180_data),
+ .need_llcc_cfg = true,
};
static const struct qcom_llcc_config sdm845_cfg = {
.sct_data = sdm845_data,
.size = ARRAY_SIZE(sdm845_data),
+ .need_llcc_cfg = false,
+};
+
+static const struct qcom_llcc_config sm8150_cfg = {
+ .sct_data = sm8150_data,
+ .size = ARRAY_SIZE(sm8150_data),
};
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
@@ -318,62 +353,91 @@ size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
}
EXPORT_SYMBOL_GPL(llcc_get_slice_size);
-static int qcom_llcc_cfg_program(struct platform_device *pdev)
+static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
+ const struct qcom_llcc_config *cfg)
{
- int i;
+ int ret;
u32 attr1_cfg;
u32 attr0_cfg;
u32 attr1_val;
u32 attr0_val;
u32 max_cap_cacheline;
+ struct llcc_slice_desc desc;
+
+ attr1_val = config->cache_mode;
+ attr1_val |= config->probe_target_ways << ATTR1_PROBE_TARGET_WAYS_SHIFT;
+ attr1_val |= config->fixed_size << ATTR1_FIXED_SIZE_SHIFT;
+ attr1_val |= config->priority << ATTR1_PRIORITY_SHIFT;
+
+ max_cap_cacheline = MAX_CAP_TO_BYTES(config->max_cap);
+
+ /*
+ * LLCC instances can vary for each target.
+ * The SW writes to broadcast register which gets propagated
+ * to each llcc instance (llcc0,.. llccN).
+ * Since the size of the memory is divided equally amongst the
+ * llcc instances, we need to configure the max cap accordingly.
+ */
+ max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
+ max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+ attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+
+ attr1_cfg = LLCC_TRP_ATTR1_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+
+ attr0_val = config->res_ways & ATTR0_RES_WAYS_MASK;
+ attr0_val |= config->bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
+
+ attr0_cfg = LLCC_TRP_ATTR0_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+
+ if (cfg->need_llcc_cfg) {
+ u32 disable_cap_alloc, retain_pc;
+
+ disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
+ ret = regmap_write(drv_data->bcast_regmap,
+ LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc);
+ if (ret)
+ return ret;
+
+ retain_pc = config->retain_on_pc << config->slice_id;
+ ret = regmap_write(drv_data->bcast_regmap,
+ LLCC_TRP_PCB_ACT, retain_pc);
+ if (ret)
+ return ret;
+ }
+
+ if (config->activate_on_init) {
+ desc.slice_id = config->slice_id;
+ ret = llcc_slice_activate(&desc);
+ }
+
+ return ret;
+}
+
+static int qcom_llcc_cfg_program(struct platform_device *pdev,
+ const struct qcom_llcc_config *cfg)
+{
+ int i;
u32 sz;
int ret = 0;
const struct llcc_slice_config *llcc_table;
- struct llcc_slice_desc desc;
sz = drv_data->cfg_size;
llcc_table = drv_data->cfg;
for (i = 0; i < sz; i++) {
- attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
- attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
-
- attr1_val = llcc_table[i].cache_mode;
- attr1_val |= llcc_table[i].probe_target_ways <<
- ATTR1_PROBE_TARGET_WAYS_SHIFT;
- attr1_val |= llcc_table[i].fixed_size <<
- ATTR1_FIXED_SIZE_SHIFT;
- attr1_val |= llcc_table[i].priority <<
- ATTR1_PRIORITY_SHIFT;
-
- max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
-
- /* LLCC instances can vary for each target.
- * The SW writes to broadcast register which gets propagated
- * to each llcc instace (llcc0,.. llccN).
- * Since the size of the memory is divided equally amongst the
- * llcc instances, we need to configure the max cap accordingly.
- */
- max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
- max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
- attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
-
- attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
- attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
-
- ret = regmap_write(drv_data->bcast_regmap, attr1_cfg,
- attr1_val);
+ ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
if (ret)
return ret;
- ret = regmap_write(drv_data->bcast_regmap, attr0_cfg,
- attr0_val);
- if (ret)
- return ret;
- if (llcc_table[i].activate_on_init) {
- desc.slice_id = llcc_table[i].slice_id;
- ret = llcc_slice_activate(&desc);
- }
}
+
return ret;
}
@@ -472,7 +536,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
mutex_init(&drv_data->lock);
platform_set_drvdata(pdev, drv_data);
- ret = qcom_llcc_cfg_program(pdev);
+ ret = qcom_llcc_cfg_program(pdev, cfg);
if (ret)
goto err;
@@ -494,6 +558,7 @@ err:
static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
+ { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ }
};
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 088dc99f77f3..209dcdca923f 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -110,7 +110,7 @@ static void pdr_locator_del_server(struct qmi_handle *qmi,
pdr->locator_addr.sq_port = 0;
}
-static struct qmi_ops pdr_locator_ops = {
+static const struct qmi_ops pdr_locator_ops = {
.new_server = pdr_locator_new_server,
.del_server = pdr_locator_del_server,
};
@@ -238,7 +238,7 @@ static void pdr_notifier_del_server(struct qmi_handle *qmi,
mutex_unlock(&pdr->list_lock);
}
-static struct qmi_ops pdr_notifier_ops = {
+static const struct qmi_ops pdr_notifier_ops = {
.new_server = pdr_notifier_new_server,
.del_server = pdr_notifier_del_server,
};
@@ -343,7 +343,7 @@ static void pdr_indication_cb(struct qmi_handle *qmi,
queue_work(pdr->indack_wq, &pdr->indack_work);
}
-static struct qmi_msg_handler qmi_indication_handler[] = {
+static const struct qmi_msg_handler qmi_indication_handler[] = {
{
.type = QMI_INDICATION,
.msg_id = SERVREG_STATE_UPDATED_IND_ID,
@@ -569,7 +569,7 @@ EXPORT_SYMBOL(pdr_add_lookup);
int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
{
struct servreg_restart_pd_resp resp;
- struct servreg_restart_pd_req req;
+ struct servreg_restart_pd_req req = { 0 };
struct sockaddr_qrtr addr;
struct pdr_service *tmp;
struct qmi_txn txn;
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 7649b2057b9a..f42954e2c98e 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -82,10 +82,11 @@
#define NUM_AHB_CLKS 2
/**
- * @struct geni_wrapper - Data structure to represent the QUP Wrapper Core
+ * struct geni_wrapper - Data structure to represent the QUP Wrapper Core
* @dev: Device pointer of the QUP wrapper core
* @base: Base address of this instance of QUP wrapper core
* @ahb_clks: Handle to the primary & secondary AHB clocks
+ * @to_core: Core ICC path
*/
struct geni_wrapper {
struct device *dev;
@@ -237,7 +238,7 @@ static void geni_se_irq_clear(struct geni_se *se)
* geni_se_init() - Initialize the GENI serial engine
* @se: Pointer to the concerned serial engine.
* @rx_wm: Receive watermark, in units of FIFO words.
- * @rx_rfr_wm: Ready-for-receive watermark, in units of FIFO words.
+ * @rx_rfr: Ready-for-receive watermark, in units of FIFO words.
*
* This function is used to initialize the GENI serial engine, configure
* receive watermark and ready-for-receive watermarks.
@@ -732,7 +733,7 @@ void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
{
struct geni_wrapper *wrapper = se->wrapper;
- if (iova && !dma_mapping_error(wrapper->dev, iova))
+ if (!dma_mapping_error(wrapper->dev, iova))
dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE);
}
EXPORT_SYMBOL(geni_se_tx_dma_unprep);
@@ -749,7 +750,7 @@ void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
{
struct geni_wrapper *wrapper = se->wrapper;
- if (iova && !dma_mapping_error(wrapper->dev, iova))
+ if (!dma_mapping_error(wrapper->dev, iova))
dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE);
}
EXPORT_SYMBOL(geni_se_rx_dma_unprep);
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index ed2c687c16b3..b5840d624bc6 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -65,6 +65,7 @@ struct qmp_cooling_device {
* @tx_lock: provides synchronization between multiple callers of qmp_send()
* @qdss_clk: QDSS clock hw struct
* @pd_data: genpd data
+ * @cooling_devs: thermal cooling devices
*/
struct qmp {
void __iomem *msgram;
@@ -225,7 +226,6 @@ static bool qmp_message_empty(struct qmp *qmp)
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
- size_t tlen;
int ret;
if (WARN_ON(len + sizeof(u32) > qmp->size))
@@ -242,7 +242,7 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
writel(len, qmp->msgram + qmp->offset);
/* Read back len to confirm data written in message RAM */
- tlen = readl(qmp->msgram + qmp->offset);
+ readl(qmp->msgram + qmp->offset);
qmp_kick(qmp);
time_left = wait_event_interruptible_timeout(qmp->event,
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index a297911afe57..37969dcbaf14 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -13,6 +13,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -497,7 +498,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
- trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
+ trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
@@ -1018,6 +1019,7 @@ static const struct of_device_id rpmh_drv_match[] = {
{ .compatible = "qcom,rpmh-rsc", },
{ }
};
+MODULE_DEVICE_TABLE(of, rpmh_drv_match);
static struct platform_driver rpmh_driver = {
.probe = rpmh_rsc_probe,
@@ -1033,3 +1035,6 @@ static int __init rpmh_driver_init(void)
return platform_driver_register(&rpmh_driver);
}
arch_initcall(rpmh_driver_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index b61e183ede69..01765ee9cdfb 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -181,8 +181,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
struct cache_req *req;
int i;
- rpm_msg->msg.state = state;
-
/* Cache the request in our store and link the payload */
for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
@@ -190,8 +188,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
return PTR_ERR(req);
}
- rpm_msg->msg.state = state;
-
if (state == RPMH_ACTIVE_ONLY_STATE) {
WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
@@ -254,7 +250,7 @@ EXPORT_SYMBOL(rpmh_write_async);
/**
* rpmh_write: Write a set of RPMH commands and block until response
*
- * @rc: The RPMH handle got from rpmh_get_client
+ * @dev: The device making the request
* @state: Active/sleep set
* @cmd: The payload data
* @n: The number of elements in @cmd
@@ -268,11 +264,9 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
int ret;
- if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
- return -EINVAL;
-
- memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
- rpm_msg.msg.num_cmds = n;
+ ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n);
+ if (ret)
+ return ret;
ret = __rpmh_write(dev, state, &rpm_msg);
if (ret)
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index e72426221a69..7ce06356d24c 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -24,9 +24,12 @@
* struct rpmhpd - top level RPMh power domain resource data structure
* @dev: rpmh power domain controller device
* @pd: generic_pm_domain corrresponding to the power domain
+ * @parent: generic_pm_domain corrresponding to the parent's power domain
* @peer: A peer power domain in case Active only Voting is
* supported
* @active_only: True if it represents an Active only peer
+ * @corner: current corner
+ * @active_corner: current active corner
* @level: An array of level (vlvl) to corner (hlvl) mappings
* derived from cmd-db
* @level_count: Number of levels supported by the power domain. max
@@ -132,6 +135,18 @@ static const struct rpmhpd_desc sdm845_desc = {
.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
};
+/* SDX55 RPMH powerdomains */
+static struct rpmhpd *sdx55_rpmhpds[] = {
+ [SDX55_MSS] = &sdm845_mss,
+ [SDX55_MX] = &sdm845_mx,
+ [SDX55_CX] = &sdm845_cx,
+};
+
+static const struct rpmhpd_desc sdx55_desc = {
+ .rpmhpds = sdx55_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdx55_rpmhpds),
+};
+
/* SM8150 RPMH powerdomains */
static struct rpmhpd sm8150_mmcx_ao;
@@ -205,6 +220,7 @@ static const struct rpmhpd_desc sc7180_desc = {
static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ }
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index f2168e4259b2..85d1207b72d7 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -35,7 +35,7 @@
#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */
#define KEY_LEVEL 0x6c766c76 /* vlvl */
-#define MAX_8996_RPMPD_STATE 6
+#define MAX_CORNER_RPMPD_STATE 6
#define DEFINE_RPMPD_PAIR(_platform, _name, _active, r_type, r_key, \
r_id) \
@@ -116,6 +116,52 @@ struct rpmpd_desc {
static DEFINE_MUTEX(rpmpd_lock);
+/* msm8939 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8939, vddmd, vddmd_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_VFC(msm8939, vddmd_vfc, SMPA, 1);
+
+DEFINE_RPMPD_PAIR(msm8939, vddcx, vddcx_ao, SMPA, CORNER, 2);
+DEFINE_RPMPD_VFC(msm8939, vddcx_vfc, SMPA, 2);
+
+DEFINE_RPMPD_PAIR(msm8939, vddmx, vddmx_ao, LDOA, CORNER, 3);
+
+static struct rpmpd *msm8939_rpmpds[] = {
+ [MSM8939_VDDMDCX] = &msm8939_vddmd,
+ [MSM8939_VDDMDCX_AO] = &msm8939_vddmd_ao,
+ [MSM8939_VDDMDCX_VFC] = &msm8939_vddmd_vfc,
+ [MSM8939_VDDCX] = &msm8939_vddcx,
+ [MSM8939_VDDCX_AO] = &msm8939_vddcx_ao,
+ [MSM8939_VDDCX_VFC] = &msm8939_vddcx_vfc,
+ [MSM8939_VDDMX] = &msm8939_vddmx,
+ [MSM8939_VDDMX_AO] = &msm8939_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8939_desc = {
+ .rpmpds = msm8939_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8939_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
+/* msm8916 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8916, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_PAIR(msm8916, vddmx, vddmx_ao, LDOA, CORNER, 3);
+
+DEFINE_RPMPD_VFC(msm8916, vddcx_vfc, SMPA, 1);
+
+static struct rpmpd *msm8916_rpmpds[] = {
+ [MSM8916_VDDCX] = &msm8916_vddcx,
+ [MSM8916_VDDCX_AO] = &msm8916_vddcx_ao,
+ [MSM8916_VDDCX_VFC] = &msm8916_vddcx_vfc,
+ [MSM8916_VDDMX] = &msm8916_vddmx,
+ [MSM8916_VDDMX_AO] = &msm8916_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8916_desc = {
+ .rpmpds = msm8916_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8916_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
/* msm8976 RPM Power Domains */
DEFINE_RPMPD_PAIR(msm8976, vddcx, vddcx_ao, SMPA, LEVEL, 2);
DEFINE_RPMPD_PAIR(msm8976, vddmx, vddmx_ao, SMPA, LEVEL, 6);
@@ -159,7 +205,7 @@ static struct rpmpd *msm8996_rpmpds[] = {
static const struct rpmpd_desc msm8996_desc = {
.rpmpds = msm8996_rpmpds,
.num_pds = ARRAY_SIZE(msm8996_rpmpds),
- .max_state = MAX_8996_RPMPD_STATE,
+ .max_state = MAX_CORNER_RPMPD_STATE,
};
/* msm8998 RPM Power domains */
@@ -220,11 +266,46 @@ static const struct rpmpd_desc qcs404_desc = {
.max_state = RPM_SMD_LEVEL_BINNING,
};
+/* sdm660 RPM Power domains */
+DEFINE_RPMPD_PAIR(sdm660, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sdm660, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(sdm660, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sdm660, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(sdm660, vdd_ssccx, RWLC, 0);
+DEFINE_RPMPD_VFL(sdm660, vdd_ssccx_vfl, RWLC, 0);
+
+DEFINE_RPMPD_LEVEL(sdm660, vdd_sscmx, RWLM, 0);
+DEFINE_RPMPD_VFL(sdm660, vdd_sscmx_vfl, RWLM, 0);
+
+static struct rpmpd *sdm660_rpmpds[] = {
+ [SDM660_VDDCX] = &sdm660_vddcx,
+ [SDM660_VDDCX_AO] = &sdm660_vddcx_ao,
+ [SDM660_VDDCX_VFL] = &sdm660_vddcx_vfl,
+ [SDM660_VDDMX] = &sdm660_vddmx,
+ [SDM660_VDDMX_AO] = &sdm660_vddmx_ao,
+ [SDM660_VDDMX_VFL] = &sdm660_vddmx_vfl,
+ [SDM660_SSCCX] = &sdm660_vdd_ssccx,
+ [SDM660_SSCCX_VFL] = &sdm660_vdd_ssccx_vfl,
+ [SDM660_SSCMX] = &sdm660_vdd_sscmx,
+ [SDM660_SSCMX_VFL] = &sdm660_vdd_sscmx_vfl,
+};
+
+static const struct rpmpd_desc sdm660_desc = {
+ .rpmpds = sdm660_rpmpds,
+ .num_pds = ARRAY_SIZE(sdm660_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO,
+};
+
static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
+ { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
{ .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
+ { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmpd_match_table);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 28c19bcb2f20..7251827bac88 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -122,7 +122,7 @@ struct smem_global_entry {
* @free_offset: index of the first unallocated byte in smem
* @available: number of bytes available for allocation
* @reserved: reserved field, must be 0
- * toc: array of references to items
+ * @toc: array of references to items
*/
struct smem_header {
struct smem_proc_comm proc_comm[4];
@@ -255,6 +255,7 @@ struct smem_region {
* processor/host
* @cacheline: list of cacheline sizes for each host
* @item_count: max accepted item number
+ * @socinfo: platform device pointer
* @num_regions: number of @regions
* @regions: list of the memory regions defining the shared memory
*/
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 07183d731d74..2df488333be9 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -112,6 +112,7 @@ struct smp2p_entry {
* struct qcom_smp2p - device driver context
* @dev: device driver handle
* @in: pointer to the inbound smem item
+ * @out: pointer to the outbound smem item
* @smem_items: ids of the two smem items
* @valid_entries: already scanned inbound entries
* @local_pid: processor id of the inbound edge
@@ -318,15 +319,16 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
static int smp2p_update_bits(void *data, u32 mask, u32 value)
{
struct smp2p_entry *entry = data;
+ unsigned long flags;
u32 orig;
u32 val;
- spin_lock(&entry->lock);
+ spin_lock_irqsave(&entry->lock, flags);
val = orig = readl(entry->value);
val &= ~mask;
val |= value;
writel(val, entry->value);
- spin_unlock(&entry->lock);
+ spin_unlock_irqrestore(&entry->lock, flags);
if (val != orig)
qcom_smp2p_kick(entry->smp2p);
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 70c3c90b997c..1d3d5e3ec2b0 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -130,7 +130,7 @@ struct smsm_host {
/**
* smsm_update_bits() - change bit in outgoing entry and inform subscribers
* @data: smsm context pointer
- * @offset: bit in the entry
+ * @mask: value mask
* @value: new value
*
* Used to set and clear the bits in the outgoing/local entry and inform
@@ -254,10 +254,8 @@ static void smsm_mask_irq(struct irq_data *irqd)
* smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
* @irqd: IRQ handle to be unmasked
*
-
* This subscribes the local CPU to interrupts upon changes to the defined
* status bit. The bit is also marked for cascading.
-
*/
static void smsm_unmask_irq(struct irq_data *irqd)
{
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index b44ede48decc..d21530d24253 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -218,13 +218,19 @@ static const struct soc_id soc_id[] = {
{ 251, "MSM8992" },
{ 253, "APQ8094" },
{ 291, "APQ8096" },
+ { 293, "MSM8953" },
+ { 304, "APQ8053" },
{ 305, "MSM8996SG" },
{ 310, "MSM8996AU" },
{ 311, "APQ8096AU" },
{ 312, "APQ8096SG" },
{ 318, "SDM630" },
{ 321, "SDM845" },
+ { 338, "SDM450" },
{ 341, "SDA845" },
+ { 349, "SDM632" },
+ { 350, "SDA632" },
+ { 351, "SDA450" },
{ 356, "SM8250" },
{ 402, "IPQ6018" },
{ 425, "SC7180" },
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index e5c68051fb17..32bed249f90e 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -68,9 +68,8 @@ struct wcnss_msg_hdr {
u32 len;
} __packed;
-/**
+/*
* struct wcnss_version_resp - version request response
- * @hdr: common packet wcnss_msg_hdr header
*/
struct wcnss_version_resp {
struct wcnss_msg_hdr hdr;
@@ -108,9 +107,11 @@ struct wcnss_download_nv_resp {
/**
* wcnss_ctrl_smd_callback() - handler from SMD responses
- * @channel: smd channel handle
+ * @rpdev: remote processor message device pointer
* @data: pointer to the incoming data packet
* @count: size of the incoming data packet
+ * @priv: unused
+ * @addr: unused
*
* Handles any incoming packets from the remote WCNSS_CTRL service.
*/
@@ -267,6 +268,7 @@ free_req:
* @wcnss: wcnss handle, retrieved from drvdata
* @name: SMD channel name
* @cb: callback to handle incoming data on the channel
+ * @priv: private data for use in the call-back
*/
struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
{
diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c
index 54b616ad4a62..9046b8c933cb 100644
--- a/drivers/soc/renesas/rmobile-sysc.c
+++ b/drivers/soc/renesas/rmobile-sysc.c
@@ -57,19 +57,19 @@ static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
return ret;
}
- if (__raw_readl(rmobile_pd->base + PSTR) & mask) {
+ if (readl(rmobile_pd->base + PSTR) & mask) {
unsigned int retry_count;
- __raw_writel(mask, rmobile_pd->base + SPDCR);
+ writel(mask, rmobile_pd->base + SPDCR);
for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
- if (!(__raw_readl(rmobile_pd->base + SPDCR) & mask))
+ if (!(readl(rmobile_pd->base + SPDCR) & mask))
break;
cpu_relax();
}
}
pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n", genpd->name, mask,
- __raw_readl(rmobile_pd->base + PSTR));
+ readl(rmobile_pd->base + PSTR));
return 0;
}
@@ -80,13 +80,13 @@ static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
unsigned int retry_count;
int ret = 0;
- if (__raw_readl(rmobile_pd->base + PSTR) & mask)
+ if (readl(rmobile_pd->base + PSTR) & mask)
return ret;
- __raw_writel(mask, rmobile_pd->base + SWUCR);
+ writel(mask, rmobile_pd->base + SWUCR);
for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
- if (!(__raw_readl(rmobile_pd->base + SWUCR) & mask))
+ if (!(readl(rmobile_pd->base + SWUCR) & mask))
break;
if (retry_count > PSTR_RETRIES)
udelay(PSTR_DELAY_US);
@@ -98,7 +98,7 @@ static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
rmobile_pd->genpd.name, mask,
- __raw_readl(rmobile_pd->base + PSTR));
+ readl(rmobile_pd->base + PSTR));
return ret;
}
@@ -327,6 +327,7 @@ static int __init rmobile_init_pm_domains(void)
pmd = of_get_child_by_name(np, "pm-domains");
if (!pmd) {
+ iounmap(base);
pr_warn("%pOF lacks pm-domains node\n", np);
continue;
}
diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
index eece97f97ef8..cf8182fc3642 100644
--- a/drivers/soc/rockchip/io-domain.c
+++ b/drivers/soc/rockchip/io-domain.c
@@ -53,9 +53,6 @@
struct rockchip_iodomain;
-/**
- * @supplies: voltage settings matching the register bits.
- */
struct rockchip_iodomain_soc_data {
int grf_offset;
const char *supply_names[MAX_SUPPLIES];
@@ -547,6 +544,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
if (uV < 0) {
dev_err(iod->dev, "Can't determine voltage: %s\n",
supply_name);
+ ret = uV;
goto unreg_notify;
}
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 8d4d05086906..1a76eade2ed6 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -20,6 +20,7 @@ static const struct exynos_soc_id {
const char *name;
unsigned int id;
} soc_ids[] = {
+ /* List ordered by SoC name */
{ "EXYNOS3250", 0xE3472000 },
{ "EXYNOS4210", 0x43200000 }, /* EVT0 revision */
{ "EXYNOS4210", 0x43210000 },
@@ -29,10 +30,10 @@ static const struct exynos_soc_id {
{ "EXYNOS5260", 0xE5260000 },
{ "EXYNOS5410", 0xE5410000 },
{ "EXYNOS5420", 0xE5420000 },
+ { "EXYNOS5433", 0xE5433000 },
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
- { "EXYNOS5433", 0xE5433000 },
};
static const char * __init product_id_to_soc_id(unsigned int product_id)
@@ -98,9 +99,9 @@ static int __init exynos_chipid_early_init(void)
goto err;
}
- /* it is too early to use dev_info() here (soc_dev is NULL) */
- pr_info("soc soc0: Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
- soc_dev_attr->soc_id, product_id, revision);
+ dev_info(soc_device_to_device(soc_dev),
+ "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
+ soc_dev_attr->soc_id, product_id, revision);
return 0;
@@ -110,4 +111,4 @@ err:
return ret;
}
-early_initcall(exynos_chipid_early_init);
+arch_initcall(exynos_chipid_early_init);
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 17304fa18429..a18c93a4646c 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -97,6 +98,10 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = {
{ /*sentinel*/ },
};
+static const struct mfd_cell exynos_pmu_devs[] = {
+ { .name = "exynos-clkout", },
+};
+
struct regmap *exynos_get_pmu_regmap(void)
{
struct device_node *np = of_find_matching_node(NULL,
@@ -110,6 +115,7 @@ EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ int ret;
pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmu_base_addr))
@@ -128,6 +134,11 @@ static int exynos_pmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmu_context);
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
+ ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
+ if (ret)
+ return ret;
+
if (devm_of_platform_populate(dev))
dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
diff --git a/drivers/soc/samsung/exynos5422-asv.c b/drivers/soc/samsung/exynos5422-asv.c
index 01bb3050d678..ca409a976e34 100644
--- a/drivers/soc/samsung/exynos5422-asv.c
+++ b/drivers/soc/samsung/exynos5422-asv.c
@@ -383,7 +383,7 @@ static int __asv_offset_voltage(unsigned int index)
return 25000;
default:
return 0;
- };
+ }
}
static void exynos5422_asv_offset_voltage_setup(struct exynos_asv *asv)
diff --git a/drivers/soc/samsung/s3c-pm-check.c b/drivers/soc/samsung/s3c-pm-check.c
index ff3e099fc208..439d5c372512 100644
--- a/drivers/soc/samsung/s3c-pm-check.c
+++ b/drivers/soc/samsung/s3c-pm-check.c
@@ -151,7 +151,7 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
/**
* s3c_pm_runcheck() - helper to check a resource on restore.
* @res: The resource to check
- * @vak: Pointer to list of CRC32 values to check.
+ * @val: Pointer to list of CRC32 values to check.
*
* Called from the s3c_pm_check_restore() via s3c_pm_run_sysram(), this
* function runs the given memory resource checking it against the stored
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
index f10fd6cae13e..1fef0e711056 100644
--- a/drivers/soc/sunxi/Kconfig
+++ b/drivers/soc/sunxi/Kconfig
@@ -2,6 +2,14 @@
#
# Allwinner sunXi SoC drivers
#
+
+config SUNXI_MBUS
+ bool
+ default ARCH_SUNXI
+ help
+ Say y to enable the fixups needed to support the Allwinner
+ MBUS DMA quirks.
+
config SUNXI_SRAM
bool
default ARCH_SUNXI
diff --git a/drivers/soc/sunxi/Makefile b/drivers/soc/sunxi/Makefile
index 7816fbbec387..549159571d4f 100644
--- a/drivers/soc/sunxi/Makefile
+++ b/drivers/soc/sunxi/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SUNXI_MBUS) += sunxi_mbus.o
obj-$(CONFIG_SUNXI_SRAM) += sunxi_sram.o
diff --git a/drivers/soc/sunxi/sunxi_mbus.c b/drivers/soc/sunxi/sunxi_mbus.c
new file mode 100644
index 000000000000..d90e4a264b6f
--- /dev/null
+++ b/drivers/soc/sunxi/sunxi_mbus.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Maxime Ripard <maxime@cerno.tech> */
+
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static const char * const sunxi_mbus_devices[] = {
+ /*
+ * The display engine virtual devices are not strictly speaking
+ * connected to the MBUS, but since DRM will perform all the
+ * memory allocations and DMA operations through that device, we
+ * need to have the quirk on those devices too.
+ */
+ "allwinner,sun4i-a10-display-engine",
+ "allwinner,sun5i-a10s-display-engine",
+ "allwinner,sun5i-a13-display-engine",
+ "allwinner,sun6i-a31-display-engine",
+ "allwinner,sun6i-a31s-display-engine",
+ "allwinner,sun7i-a20-display-engine",
+ "allwinner,sun8i-a23-display-engine",
+ "allwinner,sun8i-a33-display-engine",
+ "allwinner,sun9i-a80-display-engine",
+
+ /*
+ * And now we have the regular devices connected to the MBUS
+ * (that we know of).
+ */
+ "allwinner,sun4i-a10-csi1",
+ "allwinner,sun4i-a10-display-backend",
+ "allwinner,sun4i-a10-display-frontend",
+ "allwinner,sun4i-a10-video-engine",
+ "allwinner,sun5i-a13-display-backend",
+ "allwinner,sun5i-a13-video-engine",
+ "allwinner,sun6i-a31-csi",
+ "allwinner,sun6i-a31-display-backend",
+ "allwinner,sun7i-a20-csi0",
+ "allwinner,sun7i-a20-display-backend",
+ "allwinner,sun7i-a20-display-frontend",
+ "allwinner,sun7i-a20-video-engine",
+ "allwinner,sun8i-a23-display-backend",
+ "allwinner,sun8i-a23-display-frontend",
+ "allwinner,sun8i-a33-display-backend",
+ "allwinner,sun8i-a33-display-frontend",
+ "allwinner,sun8i-a33-video-engine",
+ "allwinner,sun8i-a83t-csi",
+ "allwinner,sun8i-h3-csi",
+ "allwinner,sun8i-h3-video-engine",
+ "allwinner,sun8i-v3s-csi",
+ "allwinner,sun9i-a80-display-backend",
+ "allwinner,sun50i-a64-csi",
+ "allwinner,sun50i-a64-video-engine",
+ "allwinner,sun50i-h5-video-engine",
+ NULL,
+};
+
+static int sunxi_mbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+{
+ struct device *dev = __dev;
+ int ret;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ /*
+ * Only the devices that need a large memory bandwidth do DMA
+ * directly over the memory bus (called MBUS), instead of going
+ * through the regular system bus.
+ */
+ if (!of_device_compatible_match(dev->of_node, sunxi_mbus_devices))
+ return NOTIFY_DONE;
+
+ /*
+ * Devices with an interconnects property have the MBUS
+ * relationship described in their DT and dealt with by
+ * of_dma_configure, so we can just skip them.
+ *
+ * Older DTs or SoCs who are not clearly understood need to set
+ * that DMA offset though.
+ */
+ if (of_find_property(dev->of_node, "interconnects", NULL))
+ return NOTIFY_DONE;
+
+ ret = dma_direct_set_offset(dev, PHYS_OFFSET, 0, SZ_4G);
+ if (ret)
+ dev_err(dev, "Couldn't setup our DMA offset: %d\n", ret);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sunxi_mbus_nb = {
+ .notifier_call = sunxi_mbus_notifier,
+};
+
+static const char * const sunxi_mbus_platforms[] __initconst = {
+ "allwinner,sun4i-a10",
+ "allwinner,sun5i-a10s",
+ "allwinner,sun5i-a13",
+ "allwinner,sun6i-a31",
+ "allwinner,sun7i-a20",
+ "allwinner,sun8i-a23",
+ "allwinner,sun8i-a33",
+ "allwinner,sun8i-a83t",
+ "allwinner,sun8i-h3",
+ "allwinner,sun8i-r40",
+ "allwinner,sun8i-v3",
+ "allwinner,sun8i-v3s",
+ "allwinner,sun9i-a80",
+ "allwinner,sun50i-a64",
+ "allwinner,sun50i-h5",
+ "nextthing,gr8",
+ NULL,
+};
+
+static int __init sunxi_mbus_init(void)
+{
+ if (!of_device_compatible_match(of_root, sunxi_mbus_platforms))
+ return 0;
+
+ bus_register_notifier(&platform_bus_type, &sunxi_mbus_nb);
+ return 0;
+}
+arch_initcall(sunxi_mbus_init);
diff --git a/drivers/soc/tegra/fuse/speedo-tegra124.c b/drivers/soc/tegra/fuse/speedo-tegra124.c
index bdbf76bb184f..5b1ee28e4272 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra124.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra124.c
@@ -101,8 +101,7 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
{
- int i, threshold, cpu_speedo_0_value, soc_speedo_0_value;
- int cpu_iddq_value, gpu_iddq_value, soc_iddq_value;
+ int i, threshold, soc_speedo_0_value;
BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
THRESHOLD_INDEX_COUNT);
@@ -111,25 +110,17 @@ void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- cpu_speedo_0_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
-
- /* GPU Speedo is stored in CPU_SPEEDO_2 */
- sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
-
- soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
-
- cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
- soc_iddq_value = tegra_fuse_read_early(FUSE_SOC_IDDQ);
- gpu_iddq_value = tegra_fuse_read_early(FUSE_GPU_IDDQ);
-
- sku_info->cpu_speedo_value = cpu_speedo_0_value;
-
+ sku_info->cpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
if (sku_info->cpu_speedo_value == 0) {
pr_warn("Tegra Warning: Speedo value not fused.\n");
WARN_ON(1);
return;
}
+ /* GPU Speedo is stored in CPU_SPEEDO_2 */
+ sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+ soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+
rev_sku_to_speedo_ids(sku_info, &threshold);
sku_info->cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
index 70d3f6e1aa33..695d0b7f9a8a 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -94,7 +94,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num)
unsigned int i;
for (i = 0; i < num; i++)
- if (value < speedos[num])
+ if (value < speedos[i])
return i;
return -EINVAL;
@@ -102,7 +102,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num)
void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
{
- int cpu_speedo[3], soc_speedo[3], cpu_iddq, gpu_iddq, soc_iddq;
+ int cpu_speedo[3], soc_speedo[3];
unsigned int index;
u8 speedo_revision;
@@ -122,10 +122,6 @@ void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2);
- cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
- soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
- gpu_iddq = tegra_fuse_read_early(FUSE_GPU_IDDQ) * 5;
-
/*
* Determine CPU, GPU and SoC speedo values depending on speedo fusing
* revision. Note that GPU speedo value is fused in CPU_SPEEDO_2.
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index f5b82ffa637b..7e2fb1c16af1 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -1,22 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-# 64-bit ARM SoCs from TI
-if ARM64
-
-if ARCH_K3
-
-config ARCH_K3_AM6_SOC
- bool "K3 AM6 SoC"
- help
- Enable support for TI's AM6 SoC Family support
-
-config ARCH_K3_J721E_SOC
- bool "K3 J721E SoC"
- help
- Enable support for TI's J721E SoC Family support
-
-endif
-
-endif
#
# TI SOC drivers
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 1147dc4c1d59..b495b0d5d0fa 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
+#include <linux/dma/ti-cppi5.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
@@ -21,6 +22,7 @@ static LIST_HEAD(k3_ringacc_list);
static DEFINE_MUTEX(k3_ringacc_list_lock);
#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+#define K3_DMARING_CFG_RING_SIZE_ELCNT_MASK GENMASK(15, 0)
/**
* struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
@@ -43,7 +45,13 @@ struct k3_ring_rt_regs {
u32 hwindx;
};
-#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_DMARING_RT_REGS_STEP 0x2000
+#define K3_DMARING_RT_REGS_REVERSE_OFS 0x1000
+#define K3_RINGACC_RT_OCC_MASK GENMASK(20, 0)
+#define K3_DMARING_RT_OCC_TDOWN_COMPLETE BIT(31)
+#define K3_DMARING_RT_DB_ENTRY_MASK GENMASK(7, 0)
+#define K3_DMARING_RT_DB_TDOWN_ACK BIT(31)
/**
* struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
@@ -122,6 +130,7 @@ struct k3_ring_state {
u32 occ;
u32 windex;
u32 rindex;
+ u32 tdown_complete:1;
};
/**
@@ -137,10 +146,13 @@ struct k3_ring_state {
* @elm_size: Size of the ring element
* @mode: Ring mode
* @flags: flags
+ * @state: Ring state
* @ring_id: Ring Id
* @parent: Pointer on struct @k3_ringacc
* @use_count: Use count for shared rings
* @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ * @dma_dev: device to be used for DMA API (allocation, mapping)
+ * @asel: Address Space Select value for physical addresses
*/
struct k3_ring {
struct k3_ring_rt_regs __iomem *rt;
@@ -155,11 +167,15 @@ struct k3_ring {
u32 flags;
#define K3_RING_FLAG_BUSY BIT(1)
#define K3_RING_FLAG_SHARED BIT(2)
+#define K3_RING_FLAG_REVERSE BIT(3)
struct k3_ring_state state;
u32 ring_id;
struct k3_ringacc *parent;
u32 use_count;
int proxy_id;
+ struct device *dma_dev;
+ u32 asel;
+#define K3_ADDRESS_ASEL_SHIFT 48
};
struct k3_ringacc_ops {
@@ -185,6 +201,7 @@ struct k3_ringacc_ops {
* @tisci_ring_ops: ti-sci rings ops
* @tisci_dev_id: ti-sci device id
* @ops: SoC specific ringacc operation
+ * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA)
*/
struct k3_ringacc {
struct device *dev;
@@ -207,6 +224,7 @@ struct k3_ringacc {
u32 tisci_dev_id;
const struct k3_ringacc_ops *ops;
+ bool dma_rings;
};
/**
@@ -218,6 +236,21 @@ struct k3_ringacc_soc_data {
unsigned dma_ring_reset_quirk:1;
};
+static int k3_ringacc_ring_read_occ(struct k3_ring *ring)
+{
+ return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK;
+}
+
+static void k3_ringacc_ring_update_occ(struct k3_ring *ring)
+{
+ u32 val;
+
+ val = readl(&ring->rt->occ);
+
+ ring->state.occ = val & K3_RINGACC_RT_OCC_MASK;
+ ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE);
+}
+
static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
{
return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
@@ -231,12 +264,24 @@ static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
static struct k3_ring_ops k3_ring_mode_ring_ops = {
.push_tail = k3_ringacc_ring_push_mem,
.pop_head = k3_ringacc_ring_pop_mem,
};
+static struct k3_ring_ops k3_dmaring_fwd_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_dmaring_fwd_pop,
+};
+
+static struct k3_ring_ops k3_dmaring_reverse_ops = {
+ /* Reverse side of the DMA ring can only be popped by SW */
+ .pop_head = k3_dmaring_reverse_pop,
+};
+
static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
@@ -339,6 +384,40 @@ error:
}
EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ /*
+ * DMA rings must be requested by ID, completion ring is the reverse
+ * side of the forward ring
+ */
+ if (fwd_id < 0)
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (test_bit(fwd_id, ringacc->rings_inuse)) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ *fwd_ring = &ringacc->rings[fwd_id];
+ *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings];
+ set_bit(fwd_id, ringacc->rings_inuse);
+ ringacc->rings[fwd_id].use_count++;
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id);
+
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return ret;
+}
+
int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
int fwd_id, int compl_id,
struct k3_ring **fwd_ring,
@@ -349,6 +428,10 @@ int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
if (!fwd_ring || !compl_ring)
return -EINVAL;
+ if (ringacc->dma_rings)
+ return k3_dmaring_request_dual_ring(ringacc, fwd_id,
+ fwd_ring, compl_ring);
+
*fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
if (!(*fwd_ring))
return -ENODEV;
@@ -365,20 +448,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- ring->size,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID;
+ ring_cfg.count = ring->size;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -398,20 +477,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
enum k3_ring_mode mode)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- mode,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID;
+ ring_cfg.mode = mode;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -426,7 +501,7 @@ void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
goto reset;
if (!occ)
- occ = readl(&ring->rt->occ);
+ occ = k3_ringacc_ring_read_occ(ring);
if (occ) {
u32 db_ring_cnt, db_ring_cnt_cur;
@@ -478,20 +553,15 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -506,6 +576,13 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
ringacc = ring->parent;
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
if (!test_bit(ring->ring_id, ringacc->rings_inuse))
@@ -521,11 +598,14 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
k3_ringacc_ring_free_sci(ring);
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt, ring->ring_mem_dma);
ring->flags = 0;
ring->ops = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+
if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
clear_bit(ring->proxy_id, ringacc->proxy_inuse);
ring->proxy = NULL;
@@ -575,32 +655,115 @@ EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
- u32 ring_idx;
int ret;
if (!ringacc->tisci)
return -EINVAL;
- ring_idx = ring->ring_id;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring_idx,
- lower_32_bits(ring->ring_mem_dma),
- upper_32_bits(ring->ring_mem_dma),
- ring->size,
- ring->mode,
- ring->elm_size,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+ ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma);
+ ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma);
+ ring_cfg.count = ring->size;
+ ring_cfg.mode = ring->mode;
+ ring_cfg.size = ring->elm_size;
+ ring_cfg.asel = ring->asel;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
- ret, ring_idx);
+ ret, ring->ring_id);
return ret;
}
+static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc;
+ struct k3_ring *reverse_ring;
+ int ret = 0;
+
+ if (cfg->elm_size != K3_RINGACC_RING_ELSIZE_8 ||
+ cfg->mode != K3_RINGACC_RING_MODE_RING ||
+ cfg->size & ~K3_DMARING_CFG_RING_SIZE_ELCNT_MASK)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->asel = cfg->asel;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev) {
+ dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n",
+ ring->ring_id);
+ ring->dma_dev = ringacc->dev;
+ }
+
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ ring->ops = &k3_dmaring_fwd_ops;
+
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+
+ k3_ringacc_ring_dump(ring);
+
+ /* DMA rings: configure reverse ring */
+ reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings];
+ reverse_ring->size = cfg->size;
+ reverse_ring->elm_size = cfg->elm_size;
+ reverse_ring->mode = cfg->mode;
+ reverse_ring->asel = cfg->asel;
+ memset(&reverse_ring->state, 0, sizeof(reverse_ring->state));
+ reverse_ring->ops = &k3_dmaring_reverse_ops;
+
+ reverse_ring->ring_mem_virt = ring->ring_mem_virt;
+ reverse_ring->ring_mem_dma = ring->ring_mem_dma;
+ reverse_ring->flags |= K3_RING_FLAG_BUSY;
+ k3_ringacc_ring_dump(reverse_ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+ ring->proxy = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+ return ret;
+}
+
int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
{
struct k3_ringacc *ringacc;
@@ -608,8 +771,12 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
if (!ring || !cfg)
return -EINVAL;
+
ringacc = ring->parent;
+ if (ringacc->dma_rings)
+ return k3_dmaring_cfg(ring, cfg);
+
if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
@@ -648,8 +815,12 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
switch (ring->mode) {
case K3_RINGACC_RING_MODE_RING:
ring->ops = &k3_ring_mode_ring_ops;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev)
+ ring->dma_dev = ringacc->dev;
break;
case K3_RINGACC_RING_MODE_MESSAGE:
+ ring->dma_dev = ringacc->dev;
if (ring->proxy)
ring->ops = &k3_ring_mode_proxy_ops;
else
@@ -661,9 +832,9 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
goto err_free_proxy;
}
- ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
- ring->size * (4 << ring->elm_size),
- &ring->ring_mem_dma, GFP_KERNEL);
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
if (!ring->ring_mem_virt) {
dev_err(ringacc->dev, "Failed to alloc ring mem\n");
ret = -ENOMEM;
@@ -684,12 +855,13 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
return 0;
err_free_mem:
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt,
ring->ring_mem_dma);
err_free_ops:
ring->ops = NULL;
+ ring->dma_dev = NULL;
err_free_proxy:
ring->proxy = NULL;
return ret;
@@ -711,7 +883,7 @@ u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
return -EINVAL;
if (!ring->state.free)
- ring->state.free = ring->size - readl(&ring->rt->occ);
+ ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring);
return ring->state.free;
}
@@ -722,7 +894,7 @@ u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- return readl(&ring->rt->occ);
+ return k3_ringacc_ring_read_occ(ring);
}
EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
@@ -898,6 +1070,72 @@ static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
K3_RINGACC_ACCESS_MODE_POP_HEAD);
}
+/*
+ * The element is 48 bits of address + ASEL bits in the ring.
+ * ASEL is used by the DMAs and should be removed for the kernel as it is not
+ * part of the physical memory address.
+ */
+static void k3_dmaring_remove_asel_from_elem(u64 *elem)
+{
+ *elem &= GENMASK_ULL(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+ u32 elem_idx;
+
+ /*
+ * DMA rings: forward ring is always tied DMA channel and HW does not
+ * maintain any state data required for POP operation and its unknown
+ * how much elements were consumed by HW. So, to actually
+ * do POP, the read pointer has to be recalculated every time.
+ */
+ ring->state.occ = k3_ringacc_ring_read_occ(ring);
+ if (ring->state.windex >= ring->state.occ)
+ elem_idx = ring->state.windex - ring->state.occ;
+ else
+ elem_idx = ring->size - (ring->state.occ - ring->state.windex);
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx);
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.windex, elem_idx,
+ elem_ptr);
+ return 0;
+}
+
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
+
+ if (ring->state.occ) {
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
+ writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db);
+ } else if (ring->state.tdown_complete) {
+ dma_addr_t *value = elem;
+
+ *value = CPPI5_TDCM_MARKER;
+ writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db);
+ ring->state.tdown_complete = false;
+ }
+
+ dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.rindex, elem_ptr);
+ return 0;
+}
+
static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
{
void *elem_ptr;
@@ -905,6 +1143,11 @@ static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
memcpy(elem_ptr, elem, (4 << ring->elm_size));
+ if (ring->parent->dma_rings) {
+ u64 *addr = elem_ptr;
+
+ *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT);
+ }
ring->state.windex = (ring->state.windex + 1) % ring->size;
ring->state.free--;
@@ -981,12 +1224,12 @@ int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
return -EINVAL;
if (!ring->state.occ)
- ring->state.occ = k3_ringacc_ring_get_occ(ring);
+ k3_ringacc_ring_update_occ(ring);
dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
ring->state.rindex);
- if (!ring->state.occ)
+ if (!ring->state.occ && !ring->state.tdown_complete)
return -ENODATA;
if (ring->ops && ring->ops->pop_head)
@@ -1004,7 +1247,7 @@ int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
return -EINVAL;
if (!ring->state.occ)
- ring->state.occ = k3_ringacc_ring_get_occ(ring);
+ k3_ringacc_ring_update_occ(ring);
dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
ring->state.occ, ring->state.rindex);
@@ -1209,6 +1452,68 @@ static const struct of_device_id k3_ringacc_of_match[] = {
{},
};
+struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
+ struct k3_ringacc_init_data *data)
+{
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ void __iomem *base_rt;
+ struct resource *res;
+ int i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return ERR_PTR(-ENOMEM);
+
+ ringacc->dev = dev;
+ ringacc->dma_rings = true;
+ ringacc->num_rings = data->num_rings;
+ ringacc->tisci = data->tisci;
+ ringacc->tisci_dev_id = data->tisci_dev_id;
+
+ mutex_init(&ringacc->req_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ringrt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return ERR_CAST(base_rt);
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings * 2,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_rings),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ struct k3_ring *ring = &ringacc->rings[i];
+
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ ring = &ringacc->rings[ringacc->num_rings + i];
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i +
+ K3_DMARING_RT_REGS_REVERSE_OFS;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ ring->flags = K3_RING_FLAG_REVERSE;
+ }
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ dev_info(dev, "Number of rings: %u\n", ringacc->num_rings);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init);
+
static int k3_ringacc_probe(struct platform_device *pdev)
{
const struct ringacc_match_data *match_data;
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index bbbc2d2b7091..fd91129de6e5 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -40,6 +40,7 @@ static const struct k3_soc_id {
{ 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" },
{ 0xBB6D, "J7200" },
+ { 0xBB38, "AM64X" }
};
static int
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 8c863ecb1c60..7b5cb5d48f7d 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -500,7 +500,7 @@ EXPORT_SYMBOL_GPL(knav_dma_open_channel);
/**
* knav_dma_close_channel() - Destroy a dma channel
*
- * channel: dma channel handle
+ * @channel: dma channel handle
*
*/
void knav_dma_close_channel(void *channel)
@@ -749,8 +749,9 @@ static int knav_dma_probe(struct platform_device *pdev)
pm_runtime_enable(kdev->dev);
ret = pm_runtime_get_sync(kdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(kdev->dev);
dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
- return ret;
+ goto err_pm_disable;
}
/* Initialise all packet dmas */
@@ -764,7 +765,8 @@ static int knav_dma_probe(struct platform_device *pdev)
if (list_empty(&kdev->list)) {
dev_err(dev, "no valid dma instance\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_sync;
}
debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
@@ -772,6 +774,13 @@ static int knav_dma_probe(struct platform_device *pdev)
device_ready = true;
return ret;
+
+err_put_sync:
+ pm_runtime_put_sync(kdev->dev);
+err_pm_disable:
+ pm_runtime_disable(kdev->dev);
+
+ return ret;
}
static int knav_dma_remove(struct platform_device *pdev)
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index a460f201bf8e..2e521f1eda96 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
/**
* knav_queue_notify: qmss queue notfier call
*
- * @inst: qmss queue instance like accumulator
+ * @inst: - qmss queue instance like accumulator
*/
void knav_queue_notify(struct knav_queue_inst *inst)
{
@@ -511,10 +511,10 @@ static int knav_queue_flush(struct knav_queue *qh)
/**
* knav_queue_open() - open a hardware queue
- * @name - name to give the queue handle
- * @id - desired queue number if any or specifes the type
+ * @name: - name to give the queue handle
+ * @id: - desired queue number if any or specifes the type
* of queue
- * @flags - the following flags are applicable to queues:
+ * @flags: - the following flags are applicable to queues:
* KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
* exclusive by default.
* Subsequent attempts to open a shared queue should
@@ -545,7 +545,7 @@ EXPORT_SYMBOL_GPL(knav_queue_open);
/**
* knav_queue_close() - close a hardware queue handle
- * @qh - handle to close
+ * @qhandle: - handle to close
*/
void knav_queue_close(void *qhandle)
{
@@ -572,9 +572,9 @@ EXPORT_SYMBOL_GPL(knav_queue_close);
/**
* knav_queue_device_control() - Perform control operations on a queue
- * @qh - queue handle
- * @cmd - control commands
- * @arg - command argument
+ * @qhandle: - queue handle
+ * @cmd: - control commands
+ * @arg: - command argument
*
* Returns 0 on success, errno otherwise.
*/
@@ -623,10 +623,10 @@ EXPORT_SYMBOL_GPL(knav_queue_device_control);
/**
* knav_queue_push() - push data (or descriptor) to the tail of a queue
- * @qh - hardware queue handle
- * @data - data to push
- * @size - size of data to push
- * @flags - can be used to pass additional information
+ * @qhandle: - hardware queue handle
+ * @dma: - DMA data to push
+ * @size: - size of data to push
+ * @flags: - can be used to pass additional information
*
* Returns 0 on success, errno otherwise.
*/
@@ -646,8 +646,8 @@ EXPORT_SYMBOL_GPL(knav_queue_push);
/**
* knav_queue_pop() - pop data (or descriptor) from the head of a queue
- * @qh - hardware queue handle
- * @size - (optional) size of the data pop'ed.
+ * @qhandle: - hardware queue handle
+ * @size: - (optional) size of the data pop'ed.
*
* Returns a DMA address on success, 0 on failure.
*/
@@ -746,9 +746,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
/**
* knav_pool_create() - Create a pool of descriptors
- * @name - name to give the pool handle
- * @num_desc - numbers of descriptors in the pool
- * @region_id - QMSS region id from which the descriptors are to be
+ * @name: - name to give the pool handle
+ * @num_desc: - numbers of descriptors in the pool
+ * @region_id: - QMSS region id from which the descriptors are to be
* allocated.
*
* Returns a pool handle on success.
@@ -856,7 +856,7 @@ EXPORT_SYMBOL_GPL(knav_pool_create);
/**
* knav_pool_destroy() - Free a pool of descriptors
- * @pool - pool handle
+ * @ph: - pool handle
*/
void knav_pool_destroy(void *ph)
{
@@ -884,7 +884,7 @@ EXPORT_SYMBOL_GPL(knav_pool_destroy);
/**
* knav_pool_desc_get() - Get a descriptor from the pool
- * @pool - pool handle
+ * @ph: - pool handle
*
* Returns descriptor from the pool.
*/
@@ -905,7 +905,8 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_get);
/**
* knav_pool_desc_put() - return a descriptor to the pool
- * @pool - pool handle
+ * @ph: - pool handle
+ * @desc: - virtual address
*/
void knav_pool_desc_put(void *ph, void *desc)
{
@@ -918,11 +919,11 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_put);
/**
* knav_pool_desc_map() - Map descriptor for DMA transfer
- * @pool - pool handle
- * @desc - address of descriptor to map
- * @size - size of descriptor to map
- * @dma - DMA address return pointer
- * @dma_sz - adjusted return pointer
+ * @ph: - pool handle
+ * @desc: - address of descriptor to map
+ * @size: - size of descriptor to map
+ * @dma: - DMA address return pointer
+ * @dma_sz: - adjusted return pointer
*
* Returns 0 on success, errno otherwise.
*/
@@ -945,9 +946,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_map);
/**
* knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
- * @pool - pool handle
- * @dma - DMA address of descriptor to unmap
- * @dma_sz - size of descriptor to unmap
+ * @ph: - pool handle
+ * @dma: - DMA address of descriptor to unmap
+ * @dma_sz: - size of descriptor to unmap
*
* Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
* error values on return.
@@ -968,7 +969,7 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
/**
* knav_pool_count() - Get the number of descriptors in pool.
- * @pool - pool handle
+ * @ph: - pool handle
* Returns number of elements in the pool.
*/
int knav_pool_count(void *ph)
@@ -1307,12 +1308,11 @@ static int knav_setup_queue_pools(struct knav_device *kdev,
struct device_node *queue_pools)
{
struct device_node *type, *range;
- int ret;
for_each_child_of_node(queue_pools, type) {
for_each_child_of_node(type, range) {
- ret = knav_setup_queue_range(kdev, range);
/* return value ignored, we init the rest... */
+ knav_setup_queue_range(kdev, range);
}
}
@@ -1784,6 +1784,7 @@ static int knav_queue_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
dev_err(dev, "Failed to enable QMSS\n");
return ret;
}
@@ -1851,9 +1852,10 @@ static int knav_queue_probe(struct platform_device *pdev)
if (ret)
goto err;
- regions = of_get_child_by_name(node, "descriptor-regions");
+ regions = of_get_child_by_name(node, "descriptor-regions");
if (!regions) {
dev_err(dev, "descriptor-regions not specified\n");
+ ret = -ENODEV;
goto err;
}
ret = knav_queue_setup_regions(kdev, regions);
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index 980b04c38fd9..bf1468e5bccb 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -14,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/delay.h>
@@ -41,6 +43,7 @@ struct omap_prm_domain {
u16 pwrstst;
const struct omap_prm_domain_map *cap;
u32 pwrstctrl_saved;
+ unsigned int uses_pm_clk:1;
};
struct omap_rst_map {
@@ -121,6 +124,16 @@ static const struct omap_prm_domain_map omap_prm_onoff_noauto = {
.statechange = 1,
};
+static const struct omap_prm_domain_map omap_prm_alwon = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE),
+};
+
+static const struct omap_prm_domain_map omap_prm_reton = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_RETENTION),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
static const struct omap_rst_map rst_map_0[] = {
{ .rst = 0, .st = 0 },
{ .rst = -1 },
@@ -140,39 +153,237 @@ static const struct omap_rst_map rst_map_012[] = {
};
static const struct omap_prm_data omap4_prm_data[] = {
- { .name = "tesla", .base = 0x4a306400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "mpu", .base = 0x4a306300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ },
+ {
+ .name = "tesla", .base = 0x4a306400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
{
.name = "abe", .base = 0x4a306500,
.pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_all,
},
- { .name = "core", .base = 0x4a306700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati", .rstmap = rst_map_012 },
- { .name = "ivahd", .base = 0x4a306f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
- { .name = "device", .base = 0x4a307b00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "always_on_core", .base = 0x4a306600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "core", .base = 0x4a306700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati",
+ .rstmap = rst_map_012
+ },
+ {
+ .name = "ivahd", .base = 0x4a306f00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012
+ },
+ {
+ .name = "cam", .base = 0x4a307000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "dss", .base = 0x4a307100,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact
+ },
+ {
+ .name = "gfx", .base = 0x4a307200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "l3init", .base = 0x4a307300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
+ },
+ {
+ .name = "l4per", .base = 0x4a307400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
+ },
+ {
+ .name = "cefuse", .base = 0x4a307600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "wkup", .base = 0x4a307700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
+ },
+ {
+ .name = "emu", .base = 0x4a307900,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "device", .base = 0x4a307b00,
+ .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
{ },
};
static const struct omap_prm_data omap5_prm_data[] = {
- { .name = "dsp", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "mpu", .base = 0x4ae06300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ },
+ {
+ .name = "dsp", .base = 0x4ae06400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
{
.name = "abe", .base = 0x4ae06500,
.pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_nooff,
},
- { .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu", .rstmap = rst_map_012 },
- { .name = "iva", .base = 0x4ae07200, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
- { .name = "device", .base = 0x4ae07c00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "coreaon", .base = 0x4ae06600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
+ },
+ {
+ .name = "core", .base = 0x4ae06700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu",
+ .rstmap = rst_map_012
+ },
+ {
+ .name = "iva", .base = 0x4ae07200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012
+ },
+ {
+ .name = "cam", .base = 0x4ae07300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "dss", .base = 0x4ae07400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact
+ },
+ {
+ .name = "gpu", .base = 0x4ae07500,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "l3init", .base = 0x4ae07600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
+ },
+ {
+ .name = "custefuse", .base = 0x4ae07700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "wkupaon", .base = 0x4ae07800,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
+ },
+ {
+ .name = "emu", .base = 0x4ae07a00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "device", .base = 0x4ae07c00,
+ .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
{ },
};
static const struct omap_prm_data dra7_prm_data[] = {
- { .name = "dsp1", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "ipu", .base = 0x4ae06500, .rstctrl = 0x10, .rstst = 0x14, .clkdm_name = "ipu1", .rstmap = rst_map_012 },
- { .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu2", .rstmap = rst_map_012 },
- { .name = "iva", .base = 0x4ae06f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
- { .name = "dsp2", .base = 0x4ae07b00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "eve1", .base = 0x4ae07b40, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "eve2", .base = 0x4ae07b80, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "eve3", .base = 0x4ae07bc0, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "eve4", .base = 0x4ae07c00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "mpu", .base = 0x4ae06300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ },
+ {
+ .name = "dsp1", .base = 0x4ae06400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
+ },
+ {
+ .name = "ipu", .base = 0x4ae06500,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+ .clkdm_name = "ipu1"
+ },
+ {
+ .name = "coreaon", .base = 0x4ae06628,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "core", .base = 0x4ae06700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x210, .rstst = 0x214, .rstmap = rst_map_012,
+ .clkdm_name = "ipu2"
+ },
+ {
+ .name = "iva", .base = 0x4ae06f00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+ },
+ {
+ .name = "cam", .base = 0x4ae07000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "dss", .base = 0x4ae07100,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "gpu", .base = 0x4ae07200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "l3init", .base = 0x4ae07300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+ .clkdm_name = "pcie"
+ },
+ {
+ .name = "l4per", .base = 0x4ae07400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "custefuse", .base = 0x4ae07600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "wkupaon", .base = 0x4ae07724,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "emu", .base = 0x4ae07900,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "dsp2", .base = 0x4ae07b00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve1", .base = 0x4ae07b40,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve2", .base = 0x4ae07b80,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve3", .base = 0x4ae07bc0,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve4", .base = 0x4ae07c00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "rtc", .base = 0x4ae07c60,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "vpe", .base = 0x4ae07c80,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
{ },
};
@@ -187,14 +398,40 @@ static const struct omap_rst_map am3_wkup_rst_map[] = {
};
static const struct omap_prm_data am3_prm_data[] = {
- { .name = "per", .base = 0x44e00c00, .rstctrl = 0x0, .rstmap = am3_per_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp" },
- { .name = "wkup", .base = 0x44e00d00, .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
- { .name = "device", .base = 0x44e00f00, .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "per", .base = 0x44e00c00,
+ .pwrstctrl = 0xc, .pwrstst = 0x8, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x0, .rstmap = am3_per_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp"
+ },
+ {
+ .name = "wkup", .base = 0x44e00d00,
+ .pwrstctrl = 0x4, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "mpu", .base = 0x44e00e00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ },
+ {
+ .name = "device", .base = 0x44e00f00,
+ .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "rtc", .base = 0x44e01000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
{
.name = "gfx", .base = 0x44e01100,
.pwrstctrl = 0, .pwrstst = 0x10, .dmap = &omap_prm_noinact,
.rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
},
+ {
+ .name = "cefuse", .base = 0x44e01200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
{ },
};
@@ -211,13 +448,43 @@ static const struct omap_rst_map am4_device_rst_map[] = {
static const struct omap_prm_data am4_prm_data[] = {
{
+ .name = "mpu", .base = 0x44df0300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ },
+ {
.name = "gfx", .base = 0x44df0400,
.pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
},
- { .name = "per", .base = 0x44df0800, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map, .clkdm_name = "pruss_ocp" },
- { .name = "wkup", .base = 0x44df2000, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_NO_CLKDM },
- { .name = "device", .base = 0x44df4000, .rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "rtc", .base = 0x44df0500,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "tamper", .base = 0x44df0600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "cefuse", .base = 0x44df0700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "per", .base = 0x44df0800,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map,
+ .clkdm_name = "pruss_ocp"
+ },
+ {
+ .name = "wkup", .base = 0x44df2000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map,
+ .flags = OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "device", .base = 0x44df4000,
+ .rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
{ },
};
@@ -325,6 +592,38 @@ static int omap_prm_domain_power_off(struct generic_pm_domain *domain)
return 0;
}
+/*
+ * Note that ti-sysc already manages the module clocks separately so
+ * no need to manage those. Interconnect instances need clocks managed
+ * for simple-pm-bus.
+ */
+static int omap_prm_domain_attach_clock(struct device *dev,
+ struct omap_prm_domain *prmd)
+{
+ struct device_node *np = dev->of_node;
+ int error;
+
+ if (!of_device_is_compatible(np, "simple-pm-bus"))
+ return 0;
+
+ if (!of_property_read_bool(np, "clocks"))
+ return 0;
+
+ error = pm_clk_create(dev);
+ if (error)
+ return error;
+
+ error = of_pm_clk_add_clks(dev);
+ if (error < 0) {
+ pm_clk_destroy(dev);
+ return error;
+ }
+
+ prmd->uses_pm_clk = 1;
+
+ return 0;
+}
+
static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
@@ -349,6 +648,10 @@ static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
genpd_data = dev_gpd_data(dev);
genpd_data->data = NULL;
+ ret = omap_prm_domain_attach_clock(dev, prmd);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -356,7 +659,11 @@ static void omap_prm_domain_detach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
struct generic_pm_domain_data *genpd_data;
+ struct omap_prm_domain *prmd;
+ prmd = genpd_to_prm_domain(domain);
+ if (prmd->uses_pm_clk)
+ pm_clk_destroy(dev);
genpd_data = dev_gpd_data(dev);
genpd_data->data = NULL;
}
@@ -393,6 +700,7 @@ static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
prmd->pd.power_off = omap_prm_domain_power_off;
prmd->pd.attach_dev = omap_prm_domain_attach_dev;
prmd->pd.detach_dev = omap_prm_domain_detach_dev;
+ prmd->pd.flags = GENPD_FLAG_PM_CLK;
pm_genpd_init(&prmd->pd, NULL, true);
error = of_genpd_add_provider_simple(np, &prmd->pd);
@@ -484,6 +792,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
struct ti_prm_platform_data *pdata = dev_get_platdata(reset->dev);
int ret = 0;
+ /* Nothing to do if the reset is already deasserted */
+ if (!omap_reset_status(rcdev, id))
+ return 0;
+
has_rstst = reset->prm->data->rstst ||
(reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
@@ -548,6 +860,7 @@ static int omap_prm_reset_init(struct platform_device *pdev,
const struct omap_rst_map *map;
struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
char buf[32];
+ u32 v;
/*
* Check if we have controllable resets. If either rstctrl is non-zero
@@ -595,6 +908,16 @@ static int omap_prm_reset_init(struct platform_device *pdev,
map++;
}
+ /* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
+ if (prm->data->rstmap == rst_map_012) {
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if ((v & reset->mask) != reset->mask) {
+ dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
+ writel_relaxed(reset->mask, reset->prm->base +
+ reset->prm->data->rstctrl);
+ }
+ }
+
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
}
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index d2f5e7001a93..64f3e3105540 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -19,6 +19,7 @@
#include <linux/of_address.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/rtc.h>
#include <linux/rtc/rtc-omap.h>
#include <linux/sizes.h>
@@ -135,13 +136,11 @@ static int am33xx_push_sram_idle(void)
static int am33xx_do_sram_idle(u32 wfi_flags)
{
- int ret = 0;
-
if (!m3_ipc || !pm_ops)
return 0;
if (wfi_flags & WFI_FLAG_WAKE_M3)
- ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+ m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
}
@@ -555,16 +554,26 @@ static int am33xx_pm_probe(struct platform_device *pdev)
suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
#endif /* CONFIG_SUSPEND */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ goto err_pm_runtime_disable;
+ }
+
ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) {
dev_err(dev, "Unable to call core pm init!\n");
ret = -ENODEV;
- goto err_put_wkup_m3_ipc;
+ goto err_pm_runtime_put;
}
return 0;
-err_put_wkup_m3_ipc:
+err_pm_runtime_put:
+ pm_runtime_put_sync(dev);
+err_pm_runtime_disable:
+ pm_runtime_disable(dev);
wkup_m3_ipc_put(m3_ipc);
err_free_sram:
am33xx_pm_free_sram();
@@ -574,6 +583,8 @@ err_free_sram:
static int am33xx_pm_remove(struct platform_device *pdev)
{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
if (pm_ops->deinit)
pm_ops->deinit();
suspend_set_ops(NULL);
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index cc0b4ad7a3d3..5d6e7132a5c4 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -126,8 +126,6 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
int ret = 0;
data = of_device_get_match_data(dev);
- if (IS_ERR(data))
- return -ENODEV;
clks_np = of_get_child_by_name(cfg_node, "clocks");
if (!clks_np) {
@@ -175,10 +173,6 @@ static int pruss_probe(struct platform_device *pdev)
const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
data = of_device_get_match_data(&pdev->dev);
- if (IS_ERR(data)) {
- dev_err(dev, "missing private data\n");
- return -ENODEV;
- }
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret) {
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index 0eb9462f609e..a1d9c027022a 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -89,6 +89,18 @@ static int ti_sci_inta_msi_alloc_descs(struct device *dev,
list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
count++;
}
+ for (i = 0; i < res->desc[set].num_sec; i++) {
+ msi_desc = alloc_msi_entry(dev, 1, NULL);
+ if (!msi_desc) {
+ ti_sci_inta_msi_free_descs(dev);
+ return -ENOMEM;
+ }
+
+ msi_desc->inta.dev_index = res->desc[set].start_sec + i;
+ INIT_LIST_HEAD(&msi_desc->list);
+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ count++;
+ }
}
return count;
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index e9ece45d7a33..c3e2161df732 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -218,6 +218,7 @@ static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
/* Public functions */
/**
* wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @mem_type: memory type value read directly from emif
*
* wkup_m3 must know what memory type is in use to properly suspend
@@ -230,6 +231,7 @@ static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
/**
* wkup_m3_set_resume_address - Pass wkup_m3 resume address
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @addr: Physical address from which resume code should execute
*/
static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
@@ -239,6 +241,7 @@ static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
/**
* wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns code representing the status of a low power mode transition.
* 0 - Successful transition
@@ -260,6 +263,7 @@ static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_prepare_low_power - Request preparation for transition to
* low power state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @state: A kernel suspend state to enter, either MEM or STANDBY
*
* Returns 0 if preparation was successful, otherwise returns error code
@@ -315,6 +319,7 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/**
* wkup_m3_finish_low_power - Return m3 to reset state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns 0 if reset was successful, otherwise returns error code
*/
@@ -362,8 +367,7 @@ static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_set_rtc_only - Set the rtc_only flag
- * @wkup_m3_wakeup: struct wkup_m3_wakeup_src * gets assigned the
- * wakeup src value
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*/
static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
{
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 646512d7276f..0b1708dae361 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -4,6 +4,7 @@ menu "Xilinx SoC drivers"
config XILINX_VCU
tristate "Xilinx VCU logicoreIP Init"
depends on HAS_IOMEM
+ select REGMAP_MMIO
help
Provides the driver to enable and disable the isolation between the
processing system and programmable logic part by using the logicoreIP
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a3aa40996f13..14daad4efc58 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -10,39 +10,12 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/xlnx-vcu.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-
-/* Address map for different registers implemented in the VCU LogiCORE IP. */
-#define VCU_ECODER_ENABLE 0x00
-#define VCU_DECODER_ENABLE 0x04
-#define VCU_MEMORY_DEPTH 0x08
-#define VCU_ENC_COLOR_DEPTH 0x0c
-#define VCU_ENC_VERTICAL_RANGE 0x10
-#define VCU_ENC_FRAME_SIZE_X 0x14
-#define VCU_ENC_FRAME_SIZE_Y 0x18
-#define VCU_ENC_COLOR_FORMAT 0x1c
-#define VCU_ENC_FPS 0x20
-#define VCU_MCU_CLK 0x24
-#define VCU_CORE_CLK 0x28
-#define VCU_PLL_BYPASS 0x2c
-#define VCU_ENC_CLK 0x30
-#define VCU_PLL_CLK 0x34
-#define VCU_ENC_VIDEO_STANDARD 0x38
-#define VCU_STATUS 0x3c
-#define VCU_AXI_ENC_CLK 0x40
-#define VCU_AXI_DEC_CLK 0x44
-#define VCU_AXI_MCU_CLK 0x48
-#define VCU_DEC_VIDEO_STANDARD 0x4c
-#define VCU_DEC_FRAME_SIZE_X 0x50
-#define VCU_DEC_FRAME_SIZE_Y 0x54
-#define VCU_DEC_FPS 0x58
-#define VCU_BUFFER_B_FRAME 0x5c
-#define VCU_WPP_EN 0x60
-#define VCU_PLL_CLK_DEC 0x64
-#define VCU_GASKET_INIT 0x74
-#define VCU_GASKET_VALUE 0x03
+#include <linux/regmap.h>
/* vcu slcr registers, bitmask and shift */
#define VCU_PLL_CTRL 0x24
@@ -106,11 +79,20 @@ struct xvcu_device {
struct device *dev;
struct clk *pll_ref;
struct clk *aclk;
- void __iomem *logicore_reg_ba;
+ struct regmap *logicore_reg_ba;
void __iomem *vcu_slcr_ba;
u32 coreclk;
};
+static struct regmap_config vcu_settings_regmap_config = {
+ .name = "regmap",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xfff,
+ .cache_type = REGCACHE_NONE,
+};
+
/**
* struct xvcu_pll_cfg - Helper data
* @fbdiv: The integer portion of the feedback divider to the PLL
@@ -300,10 +282,12 @@ static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
int ret, i;
const struct xvcu_pll_cfg *found = NULL;
- inte = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK);
- deci = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC);
- coreclk = xvcu_read(xvcu->logicore_reg_ba, VCU_CORE_CLK) * MHZ;
- mcuclk = xvcu_read(xvcu->logicore_reg_ba, VCU_MCU_CLK) * MHZ;
+ regmap_read(xvcu->logicore_reg_ba, VCU_PLL_CLK, &inte);
+ regmap_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC, &deci);
+ regmap_read(xvcu->logicore_reg_ba, VCU_CORE_CLK, &coreclk);
+ coreclk *= MHZ;
+ regmap_read(xvcu->logicore_reg_ba, VCU_MCU_CLK, &mcuclk);
+ mcuclk *= MHZ;
if (!mcuclk || !coreclk) {
dev_err(xvcu->dev, "Invalid mcu and core clock data\n");
return -EINVAL;
@@ -498,6 +482,7 @@ static int xvcu_probe(struct platform_device *pdev)
{
struct resource *res;
struct xvcu_device *xvcu;
+ void __iomem *regs;
int ret;
xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
@@ -518,17 +503,32 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
- if (!res) {
- dev_err(&pdev->dev, "get logicore memory resource failed.\n");
- return -ENODEV;
- }
+ xvcu->logicore_reg_ba =
+ syscon_regmap_lookup_by_compatible("xlnx,vcu-settings");
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_info(&pdev->dev,
+ "could not find xlnx,vcu-settings: trying direct register access\n");
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
- xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->logicore_reg_ba) {
- dev_err(&pdev->dev, "logicore register mapping failed.\n");
- return -ENOMEM;
+ regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ xvcu->logicore_reg_ba =
+ devm_regmap_init_mmio(&pdev->dev, regs,
+ &vcu_settings_regmap_config);
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_err(&pdev->dev, "failed to init regmap\n");
+ return PTR_ERR(xvcu->logicore_reg_ba);
+ }
}
xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
@@ -560,7 +560,7 @@ static int xvcu_probe(struct platform_device *pdev)
* Bit 0 : Gasket isolation
* Bit 1 : put VCU out of reset
*/
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
/* Do the PLL Settings based on the ref clk,core and mcu clk freq */
ret = xvcu_set_pll(xvcu);
@@ -571,8 +571,6 @@ static int xvcu_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, xvcu);
- dev_info(&pdev->dev, "%s: Probed successfully\n", __func__);
-
return 0;
error_pll_ref:
@@ -599,7 +597,7 @@ static int xvcu_remove(struct platform_device *pdev)
return -ENODEV;
/* Add the the Gasket isolation and put the VCU in reset. */
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
clk_disable_unprepare(xvcu->pll_ref);
clk_disable_unprepare(xvcu->aclk);
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 809bfff3690a..62ea0c9e321b 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -189,24 +189,26 @@ static int altera_spi_txrx(struct spi_master *master,
/* send the first byte */
altera_spi_tx_word(hw);
- } else {
- while (hw->count < hw->len) {
- altera_spi_tx_word(hw);
- for (;;) {
- altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
- if (val & ALTERA_SPI_STATUS_RRDY_MSK)
- break;
+ return 1;
+ }
+
+ while (hw->count < hw->len) {
+ altera_spi_tx_word(hw);
- cpu_relax();
- }
+ for (;;) {
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
- altera_spi_rx_word(hw);
+ cpu_relax();
}
- spi_finalize_current_transfer(master);
+
+ altera_spi_rx_word(hw);
}
+ spi_finalize_current_transfer(master);
- return t->len;
+ return 0;
}
static irqreturn_t altera_spi_irq(int irq, void *dev)
@@ -252,7 +254,8 @@ static int altera_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Invalid number of chipselect: %hu\n",
pdata->num_chipselect);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit;
}
master->num_chipselect = pdata->num_chipselect;
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 70467b9d61ba..a3afd1b9ac56 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -115,6 +115,7 @@ struct cdns_spi {
void __iomem *regs;
struct clk *ref_clk;
struct clk *pclk;
+ unsigned int clk_rate;
u32 speed_hz;
const u8 *txbuf;
u8 *rxbuf;
@@ -250,7 +251,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
u32 ctrl_reg, baud_rate_val;
unsigned long frequency;
- frequency = clk_get_rate(xspi->ref_clk);
+ frequency = xspi->clk_rate;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
@@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ xspi->clk_rate = clk_get_rate(xspi->ref_clk);
/* Set to default valid value */
- master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+ master->max_speed_hz = xspi->clk_rate / 4;
xspi->speed_hz = master->max_speed_hz;
master->bits_per_word_mask = SPI_BPW_MASK(8);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 9494257e1c33..6d8e0a05a535 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -115,14 +115,13 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_spi_platform_data *pdata;
- bool pol = spi->mode & SPI_CS_HIGH;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
pdata = spi->dev.parent->parent->platform_data;
if (value == BITBANG_CS_INACTIVE) {
if (pdata->cs_control)
- pdata->cs_control(spi, !pol);
+ pdata->cs_control(spi, false);
}
if (value == BITBANG_CS_ACTIVE) {
@@ -134,7 +133,7 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
fsl_spi_change_mode(spi);
if (pdata->cs_control)
- pdata->cs_control(spi, pol);
+ pdata->cs_control(spi, true);
}
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 512e925d5ea4..881f645661cc 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -83,6 +83,7 @@ struct spi_geni_master {
spinlock_t lock;
int irq;
bool cs_flag;
+ bool abort_failed;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
- if (!time_left)
+ if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+ /*
+ * No need for a lock since SPI core has a lock and we never
+ * access this from an interrupt.
+ */
+ mas->abort_failed = true;
+ }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 m_irq, m_irq_en;
+
+ if (!mas->abort_failed)
+ return false;
+
+ /*
+ * The only known case where a transfer times out and then a cancel
+ * times out then an abort times out is if something is blocking our
+ * interrupt handler from running. Avoid starting any new transfers
+ * until that sorts itself out.
+ */
+ spin_lock_irq(&mas->lock);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+ spin_unlock_irq(&mas->lock);
+
+ if (m_irq & m_irq_en) {
+ dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+ m_irq & m_irq_en);
+ return true;
+ }
+
+ /*
+ * If we're here the problem resolved itself so no need to check more
+ * on future transfers.
+ */
+ mas->abort_failed = false;
+
+ return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag)
return;
- mas->cs_flag = set_flag;
-
pm_runtime_get_sync(mas->dev);
+
+ if (spi_geni_is_abort_still_pending(mas)) {
+ dev_err(mas->dev, "Can't set chip select\n");
+ goto exit;
+ }
+
spin_lock_irq(&mas->lock);
+ if (mas->cur_xfer) {
+ dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+ spin_unlock_irq(&mas->lock);
+ goto exit;
+ }
+
+ mas->cs_flag = set_flag;
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
- if (!time_left)
+ if (!time_left) {
+ dev_warn(mas->dev, "Timeout setting chip select\n");
handle_fifo_timeout(spi, NULL);
+ }
+exit:
pm_runtime_put(mas->dev);
}
@@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
+ /* Stop the watermark IRQ if nothing to send */
+ if (!mas->cur_xfer) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ return false;
+ }
+
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
@@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
+
+ /* Clear out the FIFO and bail if nowhere to put it */
+ if (!mas->cur_xfer) {
+ for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+ readl(se->base + SE_GENI_RX_FIFOn);
+ return;
+ }
+
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
@@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 471dedf3d339..6017209c6d2f 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
/* align packet size with data registers access */
if (spi->cur_bpw > 8)
- fthlv -= (fthlv % 2); /* multiple of 2 */
+ fthlv += (fthlv % 2) ? 1 : 0;
else
- fthlv -= (fthlv % 4); /* multiple of 4 */
+ fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
if (!fthlv)
fthlv = 1;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 51d7c004fbab..720ab34784c1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1108,6 +1108,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
+ u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
@@ -1116,8 +1117,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return -EINTR;
}
} else {
+ if (!speed_hz)
+ speed_hz = 100000;
+
ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
+ do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
@@ -3378,8 +3382,9 @@ int spi_setup(struct spi_device *spi)
if (status)
return status;
- if (!spi->max_speed_hz ||
- spi->max_speed_hz > spi->controller->max_speed_hz)
+ if (spi->controller->max_speed_hz &&
+ (!spi->max_speed_hz ||
+ spi->max_speed_hz > spi->controller->max_speed_hz))
spi->max_speed_hz = spi->controller->max_speed_hz;
mutex_lock(&spi->controller->io_mutex);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 859910ec8d9f..8cb4d923aeaa 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -682,6 +682,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "lwn,bk4" },
{ .compatible = "dh,dhcom-board" },
{ .compatible = "menlo,m53cpld" },
+ { .compatible = "cisco,spi-petra" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 10b4be1f3e78..4789d36ddfd3 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -450,9 +450,9 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
vma_set_anonymous(vma);
}
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = asma->file;
+ vma_set_file(vma, asma->file);
+ /* XXX: merge this with the get_file() above if possible */
+ fput(asma->file);
out:
mutex_unlock(&ashmem_mutex);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index d99231c737fb..80d74cce2a01 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2987,7 +2987,9 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
v32.chanlist_len = cmd->chanlist_len;
v32.data = ptr_to_compat(cmd->data);
v32.data_len = cmd->data_len;
- return copy_to_user(cmd32, &v32, sizeof(v32));
+ if (copy_to_user(cmd32, &v32, sizeof(v32)))
+ return -EFAULT;
+ return 0;
}
/* Handle 32-bit COMEDI_CMD ioctl. */
diff --git a/drivers/staging/hikey9xx/hisi-spmi-controller.c b/drivers/staging/hikey9xx/hisi-spmi-controller.c
index 861aedd5de48..0d42bc65f39b 100644
--- a/drivers/staging/hikey9xx/hisi-spmi-controller.c
+++ b/drivers/staging/hikey9xx/hisi-spmi-controller.c
@@ -278,21 +278,24 @@ static int spmi_controller_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(&pdev->dev, "can not get resource!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_controller;
}
spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
resource_size(iores));
if (!spmi_controller->base) {
dev_err(&pdev->dev, "can not remap base addr!\n");
- return -EADDRNOTAVAIL;
+ ret = -EADDRNOTAVAIL;
+ goto err_put_controller;
}
ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_controller;
}
platform_set_drvdata(pdev, spmi_controller);
@@ -309,9 +312,15 @@ static int spmi_controller_probe(struct platform_device *pdev)
ctrl->write_cmd = spmi_write_cmd;
ret = spmi_controller_add(ctrl);
- if (ret)
- dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
+ if (ret) {
+ dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
+ goto err_put_controller;
+ }
+
+ return 0;
+err_put_controller:
+ spmi_controller_put(ctrl);
return ret;
}
@@ -320,7 +329,7 @@ static int spmi_del_controller(struct platform_device *pdev)
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
spmi_controller_remove(ctrl);
- kfree(ctrl);
+ spmi_controller_put(ctrl);
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 52b9fb18c87f..b666cb23e5ca 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -1062,26 +1062,6 @@ static const struct v4l2_ctrl_config ctrl_select_isp_version = {
.def = 0,
};
-#if 0 /* #ifdef CONFIG_ION */
-/*
- * Control for ISP ion device fd
- *
- * userspace will open ion device and pass the fd to kernel.
- * this fd will be used to map shared fd to buffer.
- */
-/* V4L2_CID_ATOMISP_ION_DEVICE_FD is not defined */
-static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Ion Device Fd",
- .min = -1,
- .max = 1024,
- .step = 1,
- .def = ION_FD_UNSET
-};
-#endif
-
static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
{
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index b668a82d40ad..f5fbdbc4ffdb 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -367,7 +367,7 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx)
hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
raw_fmt->width = encoded_fmt->width;
- raw_fmt->width = encoded_fmt->width;
+ raw_fmt->height = encoded_fmt->height;
if (ctx->is_encoder)
hantro_set_fmt_out(ctx, raw_fmt);
else
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 18d54f9fd715..ddad5d274ee8 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -588,7 +588,6 @@ static const struct cedrus_variant sun50i_h6_cedrus_variant = {
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
- .quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
.mod_rate = 600000000,
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index e61c41853ba2..c96077aaef49 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -33,8 +33,6 @@
#define CEDRUS_CAPABILITY_MPEG2_DEC BIT(3)
#define CEDRUS_CAPABILITY_VP8_DEC BIT(4)
-#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
-
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
CEDRUS_CODEC_H264,
@@ -168,7 +166,6 @@ struct cedrus_dec_ops {
struct cedrus_variant {
unsigned int capabilities;
- unsigned int quirks;
unsigned int mod_rate;
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index 781c84a9b1b7..de7442d4834d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -203,7 +203,7 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
position = cedrus_buf->codec.h264.position;
sram_array[i] |= position << 1;
- if (ref_list[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+ if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF)
sram_array[i] |= BIT(0);
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 111cb91f8fc2..e2f2ff609c7e 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -224,24 +224,6 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
return ret;
}
- /*
- * The VPU is only able to handle bus addresses so we have to subtract
- * the RAM offset to the physcal addresses.
- *
- * This information will eventually be obtained from device-tree.
- *
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- */
-
-#ifdef PHYS_PFN_OFFSET
- if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET)) {
- ret = dma_direct_set_offset(dev->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
- return ret;
- }
-#endif
-
ret = of_reserved_mem_device_init(dev->dev);
if (ret && ret != -ENODEV) {
dev_err(dev->dev, "Failed to reserve memory\n");
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index d241349214e7..bc4bb4374313 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -712,7 +712,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret) {
dev_err(&pdev->dev, "failed to register dma device\n");
- return ret;
+ goto err_uninit_hsdma;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -728,6 +728,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_uninit_hsdma:
+ mtk_hsdma_uninit(hsdma);
return ret;
}
diff --git a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
index ab5a8627d371..f798b0c744a4 100644
--- a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
@@ -20,9 +20,9 @@ enum country_code_type_t {
COUNTRY_CODE_MAX
};
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index bf1417236161..11032316c53d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -3211,9 +3211,6 @@ void rtw_cfg80211_init_wiphy(struct adapter *padapter)
rtw_cfg80211_init_ht_capab(&bands->ht_cap, NL80211_BAND_2GHZ, rf_type);
}
- /* init regulary domain */
- rtw_regd_init(padapter, rtw_reg_notifier);
-
/* copy mac_addr to wiphy */
memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
@@ -3328,6 +3325,9 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
*((struct adapter **)wiphy_priv(wiphy)) = padapter;
rtw_cfg80211_preinit_wiphy(padapter, wiphy);
+ /* init regulary domain */
+ rtw_regd_init(wiphy, rtw_reg_notifier);
+
ret = wiphy_register(wiphy);
if (ret < 0) {
DBG_8192C("Couldn't register wiphy device\n");
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index b2208e5f190a..301ffff12e82 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -339,8 +339,6 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct
padapter = rtw_netdev_priv(pnetdev);
- rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
-
/* 3 3. init driver special setting, interface, OS and hardware relative */
/* 4 3.1 set hardware operation functions */
@@ -378,6 +376,8 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct
goto free_hal_data;
}
+ rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
+
/* 3 8. get WLan MAC address */
/* set mac addr */
rtw_macaddr_cfg(&psdio->func->dev, padapter->eeprompriv.mac_addr);
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index 578b9f734231..2833fc6901e6 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -139,15 +139,11 @@ static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg,
_rtw_reg_apply_flags(wiphy);
}
-int rtw_regd_init(struct adapter *padapter,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+void rtw_regd_init(struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
- struct wiphy *wiphy = padapter->rtw_wdev->wiphy;
-
_rtw_regd_init_wiphy(NULL, wiphy, reg_notifier);
-
- return 0;
}
void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 893d1b406c29..1a9c50401bdb 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -896,7 +896,7 @@ int iscsit_setup_np(
else
len = sizeof(struct sockaddr_in);
/*
- * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+ * Set SO_REUSEADDR, and disable Nagle Algorithm with TCP_NODELAY.
*/
if (np->np_network_transport == ISCSI_TCP)
tcp_sock_set_nodelay(sock->sk);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 16d5a4e117a2..badba437e5f9 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -46,6 +46,15 @@ static int tcm_loop_hba_no_cnt;
static int tcm_loop_queue_status(struct se_cmd *se_cmd);
+static unsigned int tcm_loop_nr_hw_queues = 1;
+module_param_named(nr_hw_queues, tcm_loop_nr_hw_queues, uint, 0644);
+
+static unsigned int tcm_loop_can_queue = 1024;
+module_param_named(can_queue, tcm_loop_can_queue, uint, 0644);
+
+static unsigned int tcm_loop_cmd_per_lun = 1024;
+module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644);
+
/*
* Called from struct target_core_fabric_ops->check_stop_free()
*/
@@ -305,10 +314,8 @@ static struct scsi_host_template tcm_loop_driver_template = {
.eh_abort_handler = tcm_loop_abort_task,
.eh_device_reset_handler = tcm_loop_device_reset,
.eh_target_reset_handler = tcm_loop_target_reset,
- .can_queue = 1024,
.this_id = -1,
.sg_tablesize = 256,
- .cmd_per_lun = 1024,
.max_sectors = 0xFFFF,
.dma_boundary = PAGE_SIZE - 1,
.module = THIS_MODULE,
@@ -342,6 +349,9 @@ static int tcm_loop_driver_probe(struct device *dev)
sh->max_lun = 0;
sh->max_channel = 0;
sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+ sh->nr_hw_queues = tcm_loop_nr_hw_queues;
+ sh->can_queue = tcm_loop_can_queue;
+ sh->cmd_per_lun = tcm_loop_cmd_per_lun;
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 405d82d44717..7787c527aad3 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -65,6 +65,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
atomic_long_add(se_cmd->data_length,
&deve->read_bytes);
+ if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+ deve->lun_access_ro) {
+ pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ " Access for 0x%08llx\n",
+ se_cmd->se_tfo->fabric_name,
+ se_cmd->orig_fe_lun);
+ rcu_read_unlock();
+ return TCM_WRITE_PROTECTED;
+ }
+
se_lun = rcu_dereference(deve->se_lun);
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
@@ -76,17 +86,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
se_cmd->lun_ref_active = true;
-
- if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
- deve->lun_access_ro) {
- pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
- " Access for 0x%08llx\n",
- se_cmd->se_tfo->fabric_name,
- se_cmd->orig_fe_lun);
- rcu_read_unlock();
- ret = TCM_WRITE_PROTECTED;
- goto ref_dev;
- }
}
out_unlock:
rcu_read_unlock();
@@ -106,21 +105,20 @@ out_unlock:
return TCM_NON_EXISTENT_LUN;
}
- se_lun = se_sess->se_tpg->tpg_virt_lun0;
- se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
- se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
-
- percpu_ref_get(&se_lun->lun_ref);
- se_cmd->lun_ref_active = true;
-
/*
* Force WRITE PROTECT for virtual LUN 0
*/
if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
- (se_cmd->data_direction != DMA_NONE)) {
- ret = TCM_WRITE_PROTECTED;
- goto ref_dev;
- }
+ (se_cmd->data_direction != DMA_NONE))
+ return TCM_WRITE_PROTECTED;
+
+ se_lun = se_sess->se_tpg->tpg_virt_lun0;
+ if (!percpu_ref_tryget_live(&se_lun->lun_ref))
+ return TCM_NON_EXISTENT_LUN;
+
+ se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ se_cmd->lun_ref_active = true;
}
/*
* RCU reference protected by percpu se_lun->lun_ref taken above that
@@ -128,7 +126,6 @@ out_unlock:
* pointer can be kfree_rcu() by the final se_lun->lun_group put via
* target_core_fabric_configfs.c:target_fabric_port_release
*/
-ref_dev:
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
atomic_long_inc(&se_cmd->se_dev->num_cmds);
@@ -724,11 +721,24 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
struct se_lun *xcopy_lun;
+ int i;
dev = hba->backend->ops->alloc_device(hba, name);
if (!dev)
return NULL;
+ dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
+ if (!dev->queues) {
+ dev->transport->free_device(dev);
+ return NULL;
+ }
+
+ dev->queue_cnt = nr_cpu_ids;
+ for (i = 0; i < dev->queue_cnt; i++) {
+ INIT_LIST_HEAD(&dev->queues[i].state_list);
+ spin_lock_init(&dev->queues[i].lock);
+ }
+
dev->se_hba = hba;
dev->transport = hba->backend->ops;
dev->transport_flags = dev->transport->transport_flags_default;
@@ -738,9 +748,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
- spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->se_port_lock);
@@ -1013,6 +1021,7 @@ void target_free_device(struct se_device *dev)
if (dev->transport->free_prot)
dev->transport->free_prot(dev);
+ kfree(dev->queues);
dev->transport->free_device(dev);
}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 7143d03f0e02..b0cb5b95e892 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,10 +133,10 @@ static int fd_configure_device(struct se_device *dev)
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *q = bdev_get_queue(inode->i_bdev);
+ struct request_queue *q = bdev_get_queue(I_BDEV(inode));
unsigned long long dev_size;
- fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+ fd_dev->fd_block_size = bdev_logical_block_size(I_BDEV(inode));
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
@@ -559,7 +559,7 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */
- struct block_device *bdev = inode->i_bdev;
+ struct block_device *bdev = I_BDEV(inode);
struct se_device *dev = cmd->se_dev;
ret = blkdev_issue_discard(bdev,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f2bd2e207e0b..8ed93fd205c7 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -211,6 +211,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
break;
case 512:
blocks_long <<= 3;
+ break;
default:
break;
}
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5f79ea05f9b8..14db5e568f22 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -337,6 +337,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
switch (pr_reg_type) {
case PR_TYPE_WRITE_EXCLUSIVE:
we = 1;
+ fallthrough;
case PR_TYPE_EXCLUSIVE_ACCESS:
/*
* Some commands are only allowed for the persistent reservation
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 4e37fa9b409d..7994f27e4527 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1029,9 +1029,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
- if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
- return pdv->pdv_bd->bd_part->nr_sects;
-
+ if (pdv->pdv_bd)
+ return bdev_nr_sectors(pdv->pdv_bd);
return 0;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 6e8b8d30938f..f7c527a826fd 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -434,20 +434,81 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
return ret;
}
+/*
+ * compare @cmp_len bytes of @read_sgl with @cmp_sgl. On miscompare, fill
+ * @miscmp_off and return TCM_MISCOMPARE_VERIFY.
+ */
+static sense_reason_t
+compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
+ struct scatterlist *cmp_sgl, unsigned int cmp_nents,
+ unsigned int cmp_len, unsigned int *miscmp_off)
+{
+ unsigned char *buf = NULL;
+ struct scatterlist *sg;
+ sense_reason_t ret;
+ unsigned int offset;
+ size_t rc;
+ int i;
+
+ buf = kzalloc(cmp_len, GFP_KERNEL);
+ if (!buf) {
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+
+ rc = sg_copy_to_buffer(cmp_sgl, cmp_nents, buf, cmp_len);
+ if (!rc) {
+ pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+ /*
+ * Compare SCSI READ payload against verify payload
+ */
+ offset = 0;
+ ret = TCM_NO_SENSE;
+ for_each_sg(read_sgl, sg, read_nents, i) {
+ unsigned int len = min(sg->length, cmp_len);
+ unsigned char *addr = kmap_atomic(sg_page(sg));
+
+ if (memcmp(addr, buf + offset, len)) {
+ unsigned int i;
+
+ for (i = 0; i < len && addr[i] == buf[offset + i]; i++)
+ ;
+ *miscmp_off = offset + i;
+ pr_warn("Detected MISCOMPARE at offset %u\n",
+ *miscmp_off);
+ ret = TCM_MISCOMPARE_VERIFY;
+ }
+ kunmap_atomic(addr);
+ if (ret != TCM_NO_SENSE)
+ goto out;
+
+ offset += len;
+ cmp_len -= len;
+ if (!cmp_len)
+ break;
+ }
+ pr_debug("COMPARE AND WRITE read data matches compare data\n");
+out:
+ kfree(buf);
+ return ret;
+}
+
static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
struct sg_table write_tbl = { };
- struct scatterlist *write_sg, *sg;
- unsigned char *buf = NULL, *addr;
+ struct scatterlist *write_sg;
struct sg_mapping_iter m;
- unsigned int offset = 0, len;
- unsigned int nlbas = cmd->t_task_nolb;
+ unsigned int len;
unsigned int block_size = dev->dev_attrib.block_size;
- unsigned int compare_len = (nlbas * block_size);
+ unsigned int compare_len = (cmd->t_task_nolb * block_size);
+ unsigned int miscmp_off = 0;
sense_reason_t ret = TCM_NO_SENSE;
- int rc, i;
+ int i;
/*
* Handle early failure in transport_generic_request_failure(),
@@ -473,12 +534,23 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
goto out;
}
- buf = kzalloc(cmd->data_length, GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate compare_and_write buf\n");
- ret = TCM_OUT_OF_RESOURCES;
+ ret = compare_and_write_do_cmp(cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents,
+ cmd->t_data_sg,
+ cmd->t_data_nents,
+ compare_len,
+ &miscmp_off);
+ if (ret == TCM_MISCOMPARE_VERIFY) {
+ /*
+ * SBC-4 r15: 5.3 COMPARE AND WRITE command
+ * In the sense data (see 4.18 and SPC-5) the offset from the
+ * start of the Data-Out Buffer to the first byte of data that
+ * was not equal shall be reported in the INFORMATION field.
+ */
+ cmd->sense_info = miscmp_off;
+ goto out;
+ } else if (ret)
goto out;
- }
if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
pr_err("Unable to allocate compare_and_write sg\n");
@@ -486,44 +558,9 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
goto out;
}
write_sg = write_tbl.sgl;
- /*
- * Setup verify and write data payloads from total NumberLBAs.
- */
- rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
- cmd->data_length);
- if (!rc) {
- pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
- ret = TCM_OUT_OF_RESOURCES;
- goto out;
- }
- /*
- * Compare against SCSI READ payload against verify payload
- */
- for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
- addr = (unsigned char *)kmap_atomic(sg_page(sg));
- if (!addr) {
- ret = TCM_OUT_OF_RESOURCES;
- goto out;
- }
-
- len = min(sg->length, compare_len);
-
- if (memcmp(addr, buf + offset, len)) {
- pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
- addr, buf + offset);
- kunmap_atomic(addr);
- goto miscompare;
- }
- kunmap_atomic(addr);
-
- offset += len;
- compare_len -= len;
- if (!compare_len)
- break;
- }
i = 0;
- len = cmd->t_task_nolb * block_size;
+ len = compare_len;
sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
/*
* Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
@@ -568,13 +605,8 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
__target_execute_cmd(cmd, false);
- kfree(buf);
return ret;
-miscompare:
- pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
- dev->transport->name);
- ret = TCM_MISCOMPARE_VERIFY;
out:
/*
* In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
@@ -582,7 +614,6 @@ out:
*/
up(&dev->caw_sem);
sg_free_table(&write_tbl);
- kfree(buf);
return ret;
}
@@ -1439,7 +1470,7 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
if (rc) {
kunmap_atomic(daddr - dsg->offset);
kunmap_atomic(paddr - psg->offset);
- cmd->bad_sector = sector;
+ cmd->sense_info = sector;
return rc;
}
next:
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index e4513ef09159..7347285471fa 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -121,57 +121,61 @@ void core_tmr_abort_task(
unsigned long flags;
bool rc;
u64 ref_tag;
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_for_each_entry_safe(se_cmd, next, &dev->state_list, state_list) {
-
- if (se_sess != se_cmd->se_sess)
- continue;
-
- /* skip task management functions, including tmr->task_cmd */
- if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- continue;
-
- ref_tag = se_cmd->tag;
- if (tmr->ref_task_tag != ref_tag)
- continue;
-
- printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
- se_cmd->se_tfo->fabric_name, ref_tag);
-
- spin_lock(&se_sess->sess_cmd_lock);
- rc = __target_check_io_state(se_cmd, se_sess, 0);
- spin_unlock(&se_sess->sess_cmd_lock);
- if (!rc)
- continue;
-
- list_move_tail(&se_cmd->state_list, &aborted_list);
- se_cmd->state_active = false;
-
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-
- /*
- * Ensure that this ABORT request is visible to the LU RESET
- * code.
- */
- if (!tmr->tmr_dev)
- WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) <
- 0);
-
- if (dev->transport->tmr_notify)
- dev->transport->tmr_notify(dev, TMR_ABORT_TASK,
- &aborted_list);
-
- list_del_init(&se_cmd->state_list);
- target_put_cmd_and_wait(se_cmd);
-
- printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
- " ref_tag: %llu\n", ref_tag);
- tmr->response = TMR_FUNCTION_COMPLETE;
- atomic_long_inc(&dev->aborts_complete);
- return;
+ int i;
+
+ for (i = 0; i < dev->queue_cnt; i++) {
+ spin_lock_irqsave(&dev->queues[i].lock, flags);
+ list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
+ state_list) {
+ if (se_sess != se_cmd->se_sess)
+ continue;
+
+ /*
+ * skip task management functions, including
+ * tmr->task_cmd
+ */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ continue;
+
+ ref_tag = se_cmd->tag;
+ if (tmr->ref_task_tag != ref_tag)
+ continue;
+
+ pr_err("ABORT_TASK: Found referenced %s task_tag: %llu\n",
+ se_cmd->se_tfo->fabric_name, ref_tag);
+
+ spin_lock(&se_sess->sess_cmd_lock);
+ rc = __target_check_io_state(se_cmd, se_sess, 0);
+ spin_unlock(&se_sess->sess_cmd_lock);
+ if (!rc)
+ continue;
+
+ list_move_tail(&se_cmd->state_list, &aborted_list);
+ se_cmd->state_active = false;
+ spin_unlock_irqrestore(&dev->queues[i].lock, flags);
+
+ /*
+ * Ensure that this ABORT request is visible to the LU
+ * RESET code.
+ */
+ if (!tmr->tmr_dev)
+ WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < 0);
+
+ if (dev->transport->tmr_notify)
+ dev->transport->tmr_notify(dev, TMR_ABORT_TASK,
+ &aborted_list);
+
+ list_del_init(&se_cmd->state_list);
+ target_put_cmd_and_wait(se_cmd);
+
+ pr_err("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n",
+ ref_tag);
+ tmr->response = TMR_FUNCTION_COMPLETE;
+ atomic_long_inc(&dev->aborts_complete);
+ return;
+ }
+ spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list);
@@ -273,7 +277,7 @@ static void core_tmr_drain_state_list(
struct se_session *sess;
struct se_cmd *cmd, *next;
unsigned long flags;
- int rc;
+ int rc, i;
/*
* Complete outstanding commands with TASK_ABORTED SAM status.
@@ -297,35 +301,39 @@ static void core_tmr_drain_state_list(
* Note that this seems to be independent of TAS (Task Aborted Status)
* in the Control Mode Page.
*/
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
- /*
- * For PREEMPT_AND_ABORT usage, only process commands
- * with a matching reservation key.
- */
- if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
- continue;
-
- /*
- * Not aborting PROUT PREEMPT_AND_ABORT CDB..
- */
- if (prout_cmd == cmd)
- continue;
-
- sess = cmd->se_sess;
- if (WARN_ON_ONCE(!sess))
- continue;
-
- spin_lock(&sess->sess_cmd_lock);
- rc = __target_check_io_state(cmd, tmr_sess, tas);
- spin_unlock(&sess->sess_cmd_lock);
- if (!rc)
- continue;
-
- list_move_tail(&cmd->state_list, &drain_task_list);
- cmd->state_active = false;
+ for (i = 0; i < dev->queue_cnt; i++) {
+ spin_lock_irqsave(&dev->queues[i].lock, flags);
+ list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
+ state_list) {
+ /*
+ * For PREEMPT_AND_ABORT usage, only process commands
+ * with a matching reservation key.
+ */
+ if (target_check_cdb_and_preempt(preempt_and_abort_list,
+ cmd))
+ continue;
+
+ /*
+ * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+ */
+ if (prout_cmd == cmd)
+ continue;
+
+ sess = cmd->se_sess;
+ if (WARN_ON_ONCE(!sess))
+ continue;
+
+ spin_lock(&sess->sess_cmd_lock);
+ rc = __target_check_io_state(cmd, tmr_sess, tas);
+ spin_unlock(&sess->sess_cmd_lock);
+ if (!rc)
+ continue;
+
+ list_move_tail(&cmd->state_list, &drain_task_list);
+ cmd->state_active = false;
+ }
+ spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, preempt_and_abort_list ?
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 62aa5fa63ac0..736847c933e5 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -328,7 +328,7 @@ static void target_shutdown_sessions(struct se_node_acl *acl)
restart:
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
- if (sess->sess_tearing_down)
+ if (atomic_read(&sess->stopped))
continue;
list_del_init(&sess->sess_acl_list);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ff26ab0a5f60..fca4bd079d02 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -215,7 +215,7 @@ static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
{
struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
- wake_up(&sess->cmd_list_wq);
+ wake_up(&sess->cmd_count_wq);
}
/**
@@ -228,9 +228,10 @@ int transport_init_session(struct se_session *se_sess)
{
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
- INIT_LIST_HEAD(&se_sess->sess_cmd_list);
spin_lock_init(&se_sess->sess_cmd_lock);
- init_waitqueue_head(&se_sess->cmd_list_wq);
+ init_waitqueue_head(&se_sess->cmd_count_wq);
+ init_completion(&se_sess->stop_done);
+ atomic_set(&se_sess->stopped, 0);
return percpu_ref_init(&se_sess->cmd_count,
target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
}
@@ -238,6 +239,14 @@ EXPORT_SYMBOL(transport_init_session);
void transport_uninit_session(struct se_session *se_sess)
{
+ /*
+ * Drivers like iscsi and loop do not call target_stop_session
+ * during session shutdown so we have to drop the ref taken at init
+ * time here.
+ */
+ if (!atomic_read(&se_sess->stopped))
+ percpu_ref_put(&se_sess->cmd_count);
+
percpu_ref_exit(&se_sess->cmd_count);
}
@@ -650,12 +659,12 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
if (!dev)
return;
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (cmd->state_active) {
list_del(&cmd->state_list);
cmd->state_active = false;
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
/*
@@ -866,10 +875,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
target_complete_failure_work);
- if (cmd->se_cmd_flags & SCF_USE_CPUID)
- queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
- else
- queue_work(target_completion_wq, &cmd->work);
+ queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(target_complete_cmd);
@@ -897,12 +903,13 @@ static void target_add_to_state_list(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev;
unsigned long flags;
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (!cmd->state_active) {
- list_add_tail(&cmd->state_list, &dev->state_list);
+ list_add_tail(&cmd->state_list,
+ &dev->queues[cmd->cpuid].state_list);
cmd->state_active = true;
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
/*
@@ -1390,6 +1397,9 @@ void transport_init_se_cmd(
cmd->sense_buffer = sense_buffer;
cmd->orig_fe_lun = unpacked_lun;
+ if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
+ cmd->cpuid = smp_processor_id();
+
cmd->state_active = false;
}
EXPORT_SYMBOL(transport_init_se_cmd);
@@ -1607,6 +1617,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
BUG_ON(!se_tpg);
BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
BUG_ON(in_interrupt());
+
+ if (flags & TARGET_SCF_USE_CPUID)
+ se_cmd->se_cmd_flags |= SCF_USE_CPUID;
/*
* Initialize se_cmd for target operation. From this point
* exceptions are handled by sending exception status via
@@ -1616,17 +1629,11 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
data_length, data_dir, task_attr, sense,
unpacked_lun);
- if (flags & TARGET_SCF_USE_CPUID)
- se_cmd->se_cmd_flags |= SCF_USE_CPUID;
- else
- se_cmd->cpuid = WORK_CPU_UNBOUND;
-
if (flags & TARGET_SCF_UNKNOWN_SIZE)
se_cmd->unknown_data_length = 1;
/*
- * Obtain struct se_cmd->cmd_kref reference and add new cmd to
- * se_sess->sess_cmd_list. A second kref_get here is necessary
- * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+ * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
+ * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
@@ -1764,29 +1771,6 @@ static void target_complete_tmr_failure(struct work_struct *work)
transport_cmd_check_stop_to_fabric(se_cmd);
}
-static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
- u64 *unpacked_lun)
-{
- struct se_cmd *se_cmd;
- unsigned long flags;
- bool ret = false;
-
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
- if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- continue;
-
- if (se_cmd->tag == tag) {
- *unpacked_lun = se_cmd->orig_fe_lun;
- ret = true;
- break;
- }
- }
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
- return ret;
-}
-
/**
* target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
* for TMR CDBs
@@ -1834,16 +1818,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
core_tmr_release_req(se_cmd->se_tmr_req);
return ret;
}
- /*
- * If this is ABORT_TASK with no explicit fabric provided LUN,
- * go ahead and search active session tags for a match to figure
- * out unpacked_lun for the original se_cmd.
- */
- if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
- if (!target_lookup_lun_from_tag(se_sess, tag,
- &se_cmd->orig_fe_lun))
- goto failure;
- }
ret = transport_lookup_tmr_lun(se_cmd);
if (ret)
@@ -2788,14 +2762,13 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
EXPORT_SYMBOL(transport_generic_free_cmd);
/**
- * target_get_sess_cmd - Add command to active ->sess_cmd_list
+ * target_get_sess_cmd - Verify the session is accepting cmds and take ref
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
{
struct se_session *se_sess = se_cmd->se_sess;
- unsigned long flags;
int ret = 0;
/*
@@ -2810,15 +2783,8 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
}
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (se_sess->sess_tearing_down) {
+ if (!percpu_ref_tryget_live(&se_sess->cmd_count))
ret = -ESHUTDOWN;
- goto out;
- }
- list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
- percpu_ref_get(&se_sess->cmd_count);
-out:
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
if (ret && ack_kref)
target_put_sess_cmd(se_cmd);
@@ -2843,13 +2809,6 @@ static void target_release_cmd_kref(struct kref *kref)
struct se_session *se_sess = se_cmd->se_sess;
struct completion *free_compl = se_cmd->free_compl;
struct completion *abrt_compl = se_cmd->abrt_compl;
- unsigned long flags;
-
- if (se_sess) {
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- list_del_init(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- }
target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
@@ -2977,21 +2936,25 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
}
EXPORT_SYMBOL(target_show_cmd);
+static void target_stop_session_confirm(struct percpu_ref *ref)
+{
+ struct se_session *se_sess = container_of(ref, struct se_session,
+ cmd_count);
+ complete_all(&se_sess->stop_done);
+}
+
/**
- * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
- * @se_sess: session to flag
+ * target_stop_session - Stop new IO from being queued on the session.
+ * @se_sess: session to stop
*/
-void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+void target_stop_session(struct se_session *se_sess)
{
- unsigned long flags;
-
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- se_sess->sess_tearing_down = 1;
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
- percpu_ref_kill(&se_sess->cmd_count);
+ pr_debug("Stopping session queue.\n");
+ if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0)
+ percpu_ref_kill_and_confirm(&se_sess->cmd_count,
+ target_stop_session_confirm);
}
-EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
+EXPORT_SYMBOL(target_stop_session);
/**
* target_wait_for_sess_cmds - Wait for outstanding commands
@@ -2999,19 +2962,19 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
{
- struct se_cmd *cmd;
int ret;
- WARN_ON_ONCE(!se_sess->sess_tearing_down);
+ WARN_ON_ONCE(!atomic_read(&se_sess->stopped));
do {
- ret = wait_event_timeout(se_sess->cmd_list_wq,
+ pr_debug("Waiting for running cmds to complete.\n");
+ ret = wait_event_timeout(se_sess->cmd_count_wq,
percpu_ref_is_zero(&se_sess->cmd_count),
180 * HZ);
- list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
- target_show_cmd("session shutdown: still waiting for ",
- cmd);
} while (ret <= 0);
+
+ wait_for_completion(&se_sess->stop_done);
+ pr_debug("Waiting for cmds done.\n");
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
@@ -3094,14 +3057,14 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
}
EXPORT_SYMBOL(transport_wait_for_tasks);
-struct sense_info {
+struct sense_detail {
u8 key;
u8 asc;
u8 ascq;
- bool add_sector_info;
+ bool add_sense_info;
};
-static const struct sense_info sense_info_table[] = {
+static const struct sense_detail sense_detail_table[] = {
[TCM_NO_SENSE] = {
.key = NOT_READY
},
@@ -3196,24 +3159,25 @@ static const struct sense_info sense_info_table[] = {
.key = MISCOMPARE,
.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
.ascq = 0x00,
+ .add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
- .add_sector_info = true,
+ .add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
- .add_sector_info = true,
+ .add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
- .add_sector_info = true,
+ .add_sense_info = true,
},
[TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
.key = COPY_ABORTED,
@@ -3261,42 +3225,42 @@ static const struct sense_info sense_info_table[] = {
*/
static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
- const struct sense_info *si;
+ const struct sense_detail *sd;
u8 *buffer = cmd->sense_buffer;
int r = (__force int)reason;
u8 key, asc, ascq;
bool desc_format = target_sense_desc_format(cmd->se_dev);
- if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
- si = &sense_info_table[r];
+ if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key)
+ sd = &sense_detail_table[r];
else
- si = &sense_info_table[(__force int)
+ sd = &sense_detail_table[(__force int)
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
- key = si->key;
+ key = sd->key;
if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
&ascq)) {
cmd->scsi_status = SAM_STAT_BUSY;
return;
}
- } else if (si->asc == 0) {
+ } else if (sd->asc == 0) {
WARN_ON_ONCE(cmd->scsi_asc == 0);
asc = cmd->scsi_asc;
ascq = cmd->scsi_ascq;
} else {
- asc = si->asc;
- ascq = si->ascq;
+ asc = sd->asc;
+ ascq = sd->ascq;
}
cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
- if (si->add_sector_info)
+ if (sd->add_sense_info)
WARN_ON_ONCE(scsi_set_sense_information(buffer,
cmd->scsi_sense_length,
- cmd->bad_sector) < 0);
+ cmd->sense_info) < 0);
}
int
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 590e6d072228..a5991df23581 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -562,8 +562,6 @@ tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{
- if (tcmu_cmd->se_cmd)
- tcmu_cmd->se_cmd->priv = NULL;
kfree(tcmu_cmd->dbi);
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}
@@ -586,14 +584,15 @@ static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
}
static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
- struct iovec **iov, int prev_dbi, int *remain)
+ struct iovec **iov, int prev_dbi, int len)
{
/* Get the next dbi */
int dbi = tcmu_cmd_get_dbi(cmd);
+
/* Do not add more than DATA_BLOCK_SIZE to iov */
- int len = min_t(int, DATA_BLOCK_SIZE, *remain);
+ if (len > DATA_BLOCK_SIZE)
+ len = DATA_BLOCK_SIZE;
- *remain -= len;
/*
* The following code will gather and map the blocks to the same iovec
* when the blocks are all next to each other.
@@ -618,8 +617,8 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
int dbi = -2;
/* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
- while (data_length > 0)
- dbi = new_block_to_iov(udev, cmd, iov, dbi, &data_length);
+ for (; data_length > 0; data_length -= DATA_BLOCK_SIZE)
+ dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
}
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
@@ -688,67 +687,83 @@ static inline size_t head_to_end(size_t head, size_t size)
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
-static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
- struct iovec **iov)
+#define TCMU_SG_TO_DATA_AREA 1
+#define TCMU_DATA_AREA_TO_SG 2
+
+static inline void tcmu_copy_data(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd, uint32_t direction,
+ struct scatterlist *sg, unsigned int sg_nents,
+ struct iovec **iov, size_t data_len)
{
- struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
/* start value of dbi + 1 must not be a valid dbi */
- int i, dbi = -2;
- int block_remaining = 0;
- int data_len = se_cmd->data_length;
- void *from, *to = NULL;
- size_t copy_bytes, offset;
- struct scatterlist *sg;
- struct page *page = NULL;
-
- for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
- int sg_remaining = sg->length;
- from = kmap_atomic(sg_page(sg)) + sg->offset;
- while (sg_remaining > 0) {
- if (block_remaining == 0) {
- if (to) {
- flush_dcache_page(page);
- kunmap_atomic(to);
- }
-
- /* get next dbi and add to IOVs */
- dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
- &data_len);
- page = tcmu_get_block_page(udev, dbi);
- to = kmap_atomic(page);
- block_remaining = DATA_BLOCK_SIZE;
- }
+ int dbi = -2;
+ size_t block_remaining, cp_len;
+ struct sg_mapping_iter sg_iter;
+ unsigned int sg_flags;
+ struct page *page;
+ void *data_page_start, *data_addr;
- copy_bytes = min_t(size_t, sg_remaining,
- block_remaining);
- offset = DATA_BLOCK_SIZE - block_remaining;
- memcpy(to + offset, from + sg->length - sg_remaining,
- copy_bytes);
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
+ else
+ sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
+ sg_miter_start(&sg_iter, sg, sg_nents, sg_flags);
- sg_remaining -= copy_bytes;
- block_remaining -= copy_bytes;
+ while (data_len) {
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
+ data_len);
+ else
+ dbi = tcmu_cmd_get_dbi(tcmu_cmd);
+ page = tcmu_get_block_page(udev, dbi);
+ if (direction == TCMU_DATA_AREA_TO_SG)
+ flush_dcache_page(page);
+ data_page_start = kmap_atomic(page);
+ block_remaining = DATA_BLOCK_SIZE;
+
+ while (block_remaining && data_len) {
+ if (!sg_miter_next(&sg_iter)) {
+ /* set length to 0 to abort outer loop */
+ data_len = 0;
+ pr_debug("tcmu_move_data: aborting data copy due to exhausted sg_list\n");
+ break;
+ }
+ cp_len = min3(sg_iter.length, block_remaining, data_len);
+
+ data_addr = data_page_start +
+ DATA_BLOCK_SIZE - block_remaining;
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ memcpy(data_addr, sg_iter.addr, cp_len);
+ else
+ memcpy(sg_iter.addr, data_addr, cp_len);
+
+ data_len -= cp_len;
+ block_remaining -= cp_len;
+ sg_iter.consumed = cp_len;
}
- kunmap_atomic(from - sg->offset);
- }
+ sg_miter_stop(&sg_iter);
- if (to) {
- flush_dcache_page(page);
- kunmap_atomic(to);
+ kunmap_atomic(data_page_start);
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ flush_dcache_page(page);
}
}
-static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
+ struct iovec **iov)
+{
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+
+ tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, iov, se_cmd->data_length);
+}
+
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
bool bidi, uint32_t read_len)
{
- struct se_cmd *se_cmd = cmd->se_cmd;
- int i, dbi;
- int block_remaining = 0;
- void *from = NULL, *to;
- size_t copy_bytes, offset;
- struct scatterlist *sg, *data_sg;
- struct page *page;
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ struct scatterlist *data_sg;
unsigned int data_nents;
- uint32_t count = 0;
if (!bidi) {
data_sg = se_cmd->t_data_sg;
@@ -759,46 +774,15 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
* buffer blocks, and before gathering the Data-In buffer
* the Data-Out buffer blocks should be skipped.
*/
- count = cmd->dbi_cnt - cmd->dbi_bidi_cnt;
+ tcmu_cmd_set_dbi_cur(tcmu_cmd,
+ tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt);
data_sg = se_cmd->t_bidi_data_sg;
data_nents = se_cmd->t_bidi_data_nents;
}
- tcmu_cmd_set_dbi_cur(cmd, count);
-
- for_each_sg(data_sg, sg, data_nents, i) {
- int sg_remaining = sg->length;
- to = kmap_atomic(sg_page(sg)) + sg->offset;
- while (sg_remaining > 0 && read_len > 0) {
- if (block_remaining == 0) {
- if (from)
- kunmap_atomic(from);
-
- block_remaining = DATA_BLOCK_SIZE;
- dbi = tcmu_cmd_get_dbi(cmd);
- page = tcmu_get_block_page(udev, dbi);
- from = kmap_atomic(page);
- flush_dcache_page(page);
- }
- copy_bytes = min_t(size_t, sg_remaining,
- block_remaining);
- if (read_len < copy_bytes)
- copy_bytes = read_len;
- offset = DATA_BLOCK_SIZE - block_remaining;
- memcpy(to + sg->length - sg_remaining, from + offset,
- copy_bytes);
-
- sg_remaining -= copy_bytes;
- block_remaining -= copy_bytes;
- read_len -= copy_bytes;
- }
- kunmap_atomic(to - sg->offset);
- if (read_len == 0)
- break;
- }
- if (from)
- kunmap_atomic(from);
+ tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg,
+ data_nents, NULL, read_len);
}
static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
@@ -1188,11 +1172,12 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
mutex_lock(&udev->cmdr_lock);
- se_cmd->priv = tcmu_cmd;
if (!(se_cmd->transport_state & CMD_T_ABORTED))
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0)
tcmu_free_cmd(tcmu_cmd);
+ else
+ se_cmd->priv = tcmu_cmd;
mutex_unlock(&udev->cmdr_lock);
return scsi_ret;
}
@@ -1255,6 +1240,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
list_del_init(&cmd->queue_entry);
tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
unqueued = true;
}
@@ -1346,6 +1332,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
}
done:
+ se_cmd->priv = NULL;
if (read_len_valid) {
pr_debug("read_len = %d\n", read_len);
target_complete_cmd_with_length(cmd->se_cmd,
@@ -1492,6 +1479,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
se_cmd = cmd->se_cmd;
tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
@@ -1606,6 +1594,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* removed then LIO core will do the right thing and
* fail the retry.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
tcmu_free_cmd(tcmu_cmd);
continue;
@@ -1619,6 +1608,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd,
SAM_STAT_CHECK_CONDITION);
tcmu_free_cmd(tcmu_cmd);
@@ -2226,6 +2216,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
+ cmd->se_cmd->priv = NULL;
if (err_level == 1) {
/*
* Userspace was not able to start the
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 44e15d7fb2f0..66d6f1d06f21 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-struct xcopy_dev_search_info {
- const unsigned char *dev_wwn;
- struct se_device *found_dev;
-};
-
+/**
+ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
+ *
+ * @se_dev: device being considered for match
+ * @dev_wwn: XCOPY requested NAA dev_wwn
+ * @return: 1 on match, 0 on no-match
+ */
static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
- void *data)
+ const unsigned char *dev_wwn)
{
- struct xcopy_dev_search_info *info = data;
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- if (!se_dev->dev_attrib.emulate_3pc)
+ if (!se_dev->dev_attrib.emulate_3pc) {
+ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
return 0;
+ }
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
- rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
- if (rc != 0)
- return 0;
-
- info->found_dev = se_dev;
- pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
-
- rc = target_depend_item(&se_dev->dev_group.cg_item);
+ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
- pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
- rc, se_dev);
- return rc;
+ pr_debug("XCOPY: skip non-matching: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
+ return 0;
}
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
- pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
- se_dev, &se_dev->dev_group);
return 1;
}
-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
- struct se_device **found_dev)
+static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
+ const unsigned char *dev_wwn,
+ struct se_device **_found_dev,
+ struct percpu_ref **_found_lun_ref)
{
- struct xcopy_dev_search_info info;
- int ret;
-
- memset(&info, 0, sizeof(info));
- info.dev_wwn = dev_wwn;
-
- ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
- if (ret == 1) {
- *found_dev = info.found_dev;
- return 0;
- } else {
- pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
- return -EINVAL;
+ struct se_dev_entry *deve;
+ struct se_node_acl *nacl;
+ struct se_lun *this_lun = NULL;
+ struct se_device *found_dev = NULL;
+
+ /* cmd with NULL sess indicates no associated $FABRIC_MOD */
+ if (!sess)
+ goto err_out;
+
+ pr_debug("XCOPY 0xe4: searching for: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
+
+ nacl = sess->se_node_acl;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ struct se_device *this_dev;
+ int rc;
+
+ this_lun = rcu_dereference(deve->se_lun);
+ this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
+
+ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
+ if (rc) {
+ if (percpu_ref_tryget_live(&this_lun->lun_ref))
+ found_dev = this_dev;
+ break;
+ }
}
+ rcu_read_unlock();
+ if (found_dev == NULL)
+ goto err_out;
+
+ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
+ found_dev, &found_dev->dev_group);
+ *_found_dev = found_dev;
+ *_found_lun_ref = &this_lun->lun_ref;
+ return 0;
+err_out:
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
switch (xop->op_origin) {
case XCOL_SOURCE_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
- &xop->dst_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->dst_tid_wwn,
+ &xop->dst_dev,
+ &xop->remote_lun_ref);
break;
case XCOL_DEST_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
- &xop->src_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->src_tid_wwn,
+ &xop->src_dev,
+ &xop->remote_lun_ref);
break;
default:
pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
@@ -391,18 +418,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
{
- struct se_device *remote_dev;
-
if (xop->op_origin == XCOL_SOURCE_RECV_OP)
- remote_dev = xop->dst_dev;
+ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
else
- remote_dev = xop->src_dev;
-
- pr_debug("Calling configfs_undepend_item for"
- " remote_dev: %p remote_dev->dev_group: %p\n",
- remote_dev, &remote_dev->dev_group.cg_item);
+ pr_debug("putting src lun_ref for %p\n", xop->src_dev);
- target_undepend_item(&remote_dev->dev_group.cg_item);
+ percpu_ref_put(xop->remote_lun_ref);
}
static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index c56a1bde9417..e5f20005179a 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -27,6 +27,7 @@ struct xcopy_op {
struct se_device *dst_dev;
unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct percpu_ref *remote_lun_ref;
sector_t src_lba;
sector_t dst_lba;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index a7ed56602c6c..768f250680d9 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -19,7 +19,6 @@
#include <asm/unaligned.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -551,7 +550,7 @@ static void ft_send_work(struct work_struct *work)
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir,
- TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
+ TARGET_SCF_ACK_KREF))
goto err;
pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 6a38ff936389..bbe2e29612fa 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -28,7 +28,6 @@
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 4fd6a1de947c..23ce506d5402 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -275,7 +275,7 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
static void ft_close_sess(struct ft_sess *sess)
{
- target_sess_cmd_list_set_waiting(sess->se_sess);
+ target_stop_session(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
ft_sess_put(sess);
}
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index c981757ba0d4..780d7c4fd756 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
@@ -148,7 +149,8 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
*/
optee_cq_wait_for_completion(&optee->call_queue, &w);
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
- might_sleep();
+ if (need_resched())
+ cond_resched();
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 7a897d51969f..ec1d24693eba 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -98,7 +98,7 @@ static int __optee_enumerate_devices(u32 func)
return -ENODEV;
/* Open session with device enumeration pseudo TA */
- memcpy(sess_arg.uuid, pta_uuid.b, TEE_IOCTL_UUID_LEN);
+ export_uuid(sess_arg.uuid, &pta_uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 0966551cbaaa..823354a1a91a 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -584,6 +584,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
static const struct acpi_device_id int3400_thermal_match[] = {
{"INT3400", 0},
{"INTC1040", 0},
+ {"INTC1041", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index ec1d58c4ceaa..c3c4c4d34542 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -284,6 +284,7 @@ static int int3403_remove(struct platform_device *pdev)
static const struct acpi_device_id int3403_device_ids[] = {
{"INT3403", 0},
{"INTC1043", 0},
+ {"INTC1046", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index a5f988a9f948..b5442f979b4d 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -56,7 +56,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
* managed with the xHCI and the SuperSpeed hub so we create the
* link from xHCI instead.
*/
- while (!dev_is_pci(dev))
+ while (dev && !dev_is_pci(dev))
dev = dev->parent;
if (!dev)
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 8b7f941a9bb7..b8c4159bc32d 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2316,7 +2316,7 @@ static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
if (auth && auth->reply.route_hi == sw->config.route_hi &&
auth->reply.route_lo == sw->config.route_lo) {
- tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
+ tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
ret = -EIO;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 319d68c8a5df..219e85756171 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2081,9 +2081,6 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
return 0;
}
-extern ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
-
/**
* job_control - check job control
* @tty: tty
@@ -2105,7 +2102,7 @@ static int job_control(struct tty_struct *tty, struct file *file)
/* NOTE: not yet done after every sleep pending a thorough
check of the logic of this change. -- jlc */
/* don't stop on /dev/console */
- if (file->f_op->write == redirected_tty_write)
+ if (file->f_op->write_iter == redirected_tty_write)
return 0;
return __tty_check_change(tty, SIGTTIN);
@@ -2309,7 +2306,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
ssize_t retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
- if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
+ if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
retval = tty_check_change(tty);
if (retval)
return retval;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 12d71d5eb6ca..34a2899e69c0 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1567,6 +1567,38 @@ config SERIAL_MILBEAUT_USIO_CONSOLE
receives all kernel messages and warnings and which allows logins in
single user mode).
+config SERIAL_LITEUART
+ tristate "LiteUART serial port support"
+ depends on HAS_IOMEM
+ depends on OF || COMPILE_TEST
+ depends on LITEX
+ select SERIAL_CORE
+ help
+ This driver is for the FPGA-based LiteUART serial controller from LiteX
+ SoC builder.
+
+ Say 'Y' or 'M' here if you wish to use the LiteUART serial controller.
+ Otherwise, say 'N'.
+
+config SERIAL_LITEUART_MAX_PORTS
+ int "Maximum number of LiteUART ports"
+ depends on SERIAL_LITEUART
+ default "1"
+ help
+ Set this to the maximum number of serial ports you want the driver
+ to support.
+
+config SERIAL_LITEUART_CONSOLE
+ bool "LiteUART serial port console support"
+ depends on SERIAL_LITEUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say 'Y' or 'M' here if you wish to use the FPGA-based LiteUART serial
+ controller from LiteX SoC builder as the system console
+ (the system console is the device which receives all kernel messages
+ and warnings and which allows logins in single user mode).
+ Otherwise, say 'N'.
+
endmenu
config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index af44b231123c..b85d53f9e9ff 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SERIAL_OWL) += owl-uart.o
obj-$(CONFIG_SERIAL_RDA) += rda-uart.o
obj-$(CONFIG_SERIAL_MILBEAUT_USIO) += milbeaut_usio.o
obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o
+obj-$(CONFIG_SERIAL_LITEUART) += liteuart.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
new file mode 100644
index 000000000000..64842f3539e1
--- /dev/null
+++ b/drivers/tty/serial/liteuart.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LiteUART serial controller (LiteX) Driver
+ *
+ * Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
+ */
+
+#include <linux/console.h>
+#include <linux/litex.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/tty_flip.h>
+#include <linux/xarray.h>
+
+/*
+ * CSRs definitions (base address offsets + width)
+ *
+ * The definitions below are true for LiteX SoC configured for 8-bit CSR Bus,
+ * 32-bit aligned.
+ *
+ * Supporting other configurations might require new definitions or a more
+ * generic way of indexing the LiteX CSRs.
+ *
+ * For more details on how CSRs are defined and handled in LiteX, see comments
+ * in the LiteX SoC Driver: drivers/soc/litex/litex_soc_ctrl.c
+ */
+#define OFF_RXTX 0x00
+#define OFF_TXFULL 0x04
+#define OFF_RXEMPTY 0x08
+#define OFF_EV_STATUS 0x0c
+#define OFF_EV_PENDING 0x10
+#define OFF_EV_ENABLE 0x14
+
+/* events */
+#define EV_TX 0x1
+#define EV_RX 0x2
+
+struct liteuart_port {
+ struct uart_port port;
+ struct timer_list timer;
+ u32 id;
+};
+
+#define to_liteuart_port(port) container_of(port, struct liteuart_port, port)
+
+static DEFINE_XARRAY_FLAGS(liteuart_array, XA_FLAGS_ALLOC);
+
+#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
+static struct console liteuart_console;
+#endif
+
+static struct uart_driver liteuart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "liteuart",
+ .dev_name = "ttyLXU",
+ .major = 0,
+ .minor = 0,
+ .nr = CONFIG_SERIAL_LITEUART_MAX_PORTS,
+#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
+ .cons = &liteuart_console,
+#endif
+};
+
+static void liteuart_timer(struct timer_list *t)
+{
+ struct liteuart_port *uart = from_timer(uart, t, timer);
+ struct uart_port *port = &uart->port;
+ unsigned char __iomem *membase = port->membase;
+ unsigned int flg = TTY_NORMAL;
+ int ch;
+ unsigned long status;
+
+ while ((status = !litex_read8(membase + OFF_RXEMPTY)) == 1) {
+ ch = litex_read8(membase + OFF_RXTX);
+ port->icount.rx++;
+
+ /* necessary for RXEMPTY to refresh its value */
+ litex_write8(membase + OFF_EV_PENDING, EV_TX | EV_RX);
+
+ /* no overflow bits in status */
+ if (!(uart_handle_sysrq_char(port, ch)))
+ uart_insert_char(port, status, 0, ch, flg);
+
+ tty_flip_buffer_push(&port->state->port);
+ }
+
+ mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
+}
+
+static void liteuart_putchar(struct uart_port *port, int ch)
+{
+ while (litex_read8(port->membase + OFF_TXFULL))
+ cpu_relax();
+
+ litex_write8(port->membase + OFF_RXTX, ch);
+}
+
+static unsigned int liteuart_tx_empty(struct uart_port *port)
+{
+ /* not really tx empty, just checking if tx is not full */
+ if (!litex_read8(port->membase + OFF_TXFULL))
+ return TIOCSER_TEMT;
+
+ return 0;
+}
+
+static void liteuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* modem control register is not present in LiteUART */
+}
+
+static unsigned int liteuart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void liteuart_stop_tx(struct uart_port *port)
+{
+}
+
+static void liteuart_start_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned char ch;
+
+ if (unlikely(port->x_char)) {
+ litex_write8(port->membase + OFF_RXTX, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ } else if (!uart_circ_empty(xmit)) {
+ while (xmit->head != xmit->tail) {
+ ch = xmit->buf[xmit->tail];
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ liteuart_putchar(port, ch);
+ }
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void liteuart_stop_rx(struct uart_port *port)
+{
+ struct liteuart_port *uart = to_liteuart_port(port);
+
+ /* just delete timer */
+ del_timer(&uart->timer);
+}
+
+static void liteuart_break_ctl(struct uart_port *port, int break_state)
+{
+ /* LiteUART doesn't support sending break signal */
+}
+
+static int liteuart_startup(struct uart_port *port)
+{
+ struct liteuart_port *uart = to_liteuart_port(port);
+
+ /* disable events */
+ litex_write8(port->membase + OFF_EV_ENABLE, 0);
+
+ /* prepare timer for polling */
+ timer_setup(&uart->timer, liteuart_timer, 0);
+ mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
+
+ return 0;
+}
+
+static void liteuart_shutdown(struct uart_port *port)
+{
+}
+
+static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* update baudrate */
+ baud = uart_get_baud_rate(port, new, old, 0, 460800);
+ uart_update_timeout(port, new->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *liteuart_type(struct uart_port *port)
+{
+ return "liteuart";
+}
+
+static void liteuart_release_port(struct uart_port *port)
+{
+}
+
+static int liteuart_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void liteuart_config_port(struct uart_port *port, int flags)
+{
+ /*
+ * Driver core for serial ports forces a non-zero value for port type.
+ * Write an arbitrary value here to accommodate the serial core driver,
+ * as ID part of UAPI is redundant.
+ */
+ port->type = 1;
+}
+
+static int liteuart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (port->type != PORT_UNKNOWN && ser->type != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct uart_ops liteuart_ops = {
+ .tx_empty = liteuart_tx_empty,
+ .set_mctrl = liteuart_set_mctrl,
+ .get_mctrl = liteuart_get_mctrl,
+ .stop_tx = liteuart_stop_tx,
+ .start_tx = liteuart_start_tx,
+ .stop_rx = liteuart_stop_rx,
+ .break_ctl = liteuart_break_ctl,
+ .startup = liteuart_startup,
+ .shutdown = liteuart_shutdown,
+ .set_termios = liteuart_set_termios,
+ .type = liteuart_type,
+ .release_port = liteuart_release_port,
+ .request_port = liteuart_request_port,
+ .config_port = liteuart_config_port,
+ .verify_port = liteuart_verify_port,
+};
+
+static int liteuart_probe(struct platform_device *pdev)
+{
+ struct liteuart_port *uart;
+ struct uart_port *port;
+ struct xa_limit limit;
+ int dev_id, ret;
+
+ /* look for aliases; auto-enumerate for free index if not found */
+ dev_id = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (dev_id < 0)
+ limit = XA_LIMIT(0, CONFIG_SERIAL_LITEUART_MAX_PORTS);
+ else
+ limit = XA_LIMIT(dev_id, dev_id);
+
+ uart = devm_kzalloc(&pdev->dev, sizeof(struct liteuart_port), GFP_KERNEL);
+ if (!uart)
+ return -ENOMEM;
+
+ ret = xa_alloc(&liteuart_array, &dev_id, uart, limit, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ uart->id = dev_id;
+ port = &uart->port;
+
+ /* get membase */
+ port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (!port->membase)
+ return -ENXIO;
+
+ /* values not from device tree */
+ port->dev = &pdev->dev;
+ port->iotype = UPIO_MEM;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &liteuart_ops;
+ port->regshift = 2;
+ port->fifosize = 16;
+ port->iobase = 1;
+ port->type = PORT_UNKNOWN;
+ port->line = dev_id;
+ spin_lock_init(&port->lock);
+
+ return uart_add_one_port(&liteuart_driver, &uart->port);
+}
+
+static int liteuart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct liteuart_port *uart = to_liteuart_port(port);
+
+ xa_erase(&liteuart_array, uart->id);
+
+ return 0;
+}
+
+static const struct of_device_id liteuart_of_match[] = {
+ { .compatible = "litex,liteuart" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, liteuart_of_match);
+
+static struct platform_driver liteuart_platform_driver = {
+ .probe = liteuart_probe,
+ .remove = liteuart_remove,
+ .driver = {
+ .name = "liteuart",
+ .of_match_table = liteuart_of_match,
+ },
+};
+
+#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
+
+static void liteuart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct liteuart_port *uart;
+ struct uart_port *port;
+ unsigned long flags;
+
+ uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
+ port = &uart->port;
+
+ spin_lock_irqsave(&port->lock, flags);
+ uart_console_write(port, s, count, liteuart_putchar);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int liteuart_console_setup(struct console *co, char *options)
+{
+ struct liteuart_port *uart;
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
+ if (!uart)
+ return -ENODEV;
+
+ port = &uart->port;
+ if (!port->membase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct console liteuart_console = {
+ .name = "liteuart",
+ .write = liteuart_console_write,
+ .device = uart_console_device,
+ .setup = liteuart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &liteuart_driver,
+};
+
+static int __init liteuart_console_init(void)
+{
+ register_console(&liteuart_console);
+
+ return 0;
+}
+console_initcall(liteuart_console_init);
+#endif /* CONFIG_SERIAL_LITEUART_CONSOLE */
+
+static int __init liteuart_init(void)
+{
+ int res;
+
+ res = uart_register_driver(&liteuart_driver);
+ if (res)
+ return res;
+
+ res = platform_driver_register(&liteuart_platform_driver);
+ if (res) {
+ uart_unregister_driver(&liteuart_driver);
+ return res;
+ }
+
+ return 0;
+}
+
+static void __exit liteuart_exit(void)
+{
+ platform_driver_unregister(&liteuart_platform_driver);
+ uart_unregister_driver(&liteuart_driver);
+}
+
+module_init(liteuart_init);
+module_exit(liteuart_exit);
+
+MODULE_AUTHOR("Antmicro <www.antmicro.com>");
+MODULE_DESCRIPTION("LiteUART serial driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform: liteuart");
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 118b29912289..e0c00a1b0763 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port)
(val & STAT_TX_RDY(port)), 1, 10000);
}
+static void wait_for_xmite(struct uart_port *port)
+{
+ u32 val;
+
+ readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+ (val & STAT_TX_EMP), 1, 10000);
+}
+
static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
@@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
uart_console_write(port, s, count, mvebu_uart_console_putchar);
- wait_for_xmitr(port);
+ wait_for_xmite(port);
if (ier)
writel(ier, port->membase + UART_CTRL(port));
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 1066eebe3b28..328d5a78792f 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -1000,6 +1000,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
/* Set up clock divider */
ssp->clkin_rate = clk_get_rate(ssp->clk);
ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
+ ssp->port.uartclk = ssp->baud_rate * 16;
__ssp_update_div(ssp);
platform_set_drvdata(pdev, ssp);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8034489337d7..816e709afa56 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -143,12 +143,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
DEFINE_MUTEX(tty_mutex);
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
-long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
@@ -438,8 +435,7 @@ static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
return 0;
}
-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
{
return -EIO;
}
@@ -478,7 +474,8 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
static const struct file_operations tty_fops = {
.llseek = no_llseek,
.read = tty_read,
- .write = tty_write,
+ .write_iter = tty_write,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
@@ -491,7 +488,8 @@ static const struct file_operations tty_fops = {
static const struct file_operations console_fops = {
.llseek = no_llseek,
.read = tty_read,
- .write = redirected_tty_write,
+ .write_iter = redirected_tty_write,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
@@ -503,7 +501,7 @@ static const struct file_operations console_fops = {
static const struct file_operations hung_up_tty_fops = {
.llseek = no_llseek,
.read = hung_up_tty_read,
- .write = hung_up_tty_write,
+ .write_iter = hung_up_tty_write,
.poll = hung_up_tty_poll,
.unlocked_ioctl = hung_up_tty_ioctl,
.compat_ioctl = hung_up_tty_compat_ioctl,
@@ -606,9 +604,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
/* This breaks for file handles being sent over AF_UNIX sockets ? */
list_for_each_entry(priv, &tty->tty_files, list) {
filp = priv->file;
- if (filp->f_op->write == redirected_tty_write)
+ if (filp->f_op->write_iter == redirected_tty_write)
cons_filp = filp;
- if (filp->f_op->write != tty_write)
+ if (filp->f_op->write_iter != tty_write)
continue;
closecount++;
__tty_fasync(-1, filp, 0); /* can't block */
@@ -901,9 +899,9 @@ static inline ssize_t do_tty_write(
ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
struct tty_struct *tty,
struct file *file,
- const char __user *buf,
- size_t count)
+ struct iov_iter *from)
{
+ size_t count = iov_iter_count(from);
ssize_t ret, written = 0;
unsigned int chunk;
@@ -955,14 +953,20 @@ static inline ssize_t do_tty_write(
size_t size = count;
if (size > chunk)
size = chunk;
+
ret = -EFAULT;
- if (copy_from_user(tty->write_buf, buf, size))
+ if (copy_from_iter(tty->write_buf, size, from) != size)
break;
+
ret = write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
+
+ /* FIXME! Have Al check this! */
+ if (ret != size)
+ iov_iter_revert(from, size-ret);
+
written += ret;
- buf += ret;
count -= ret;
if (!count)
break;
@@ -1022,8 +1026,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
* write method will not be invoked in parallel for each device.
*/
-static ssize_t tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
{
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
@@ -1038,17 +1041,21 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
tty_err(tty, "missing write_room method\n");
ld = tty_ldisc_ref_wait(tty);
if (!ld)
- return hung_up_tty_write(file, buf, count, ppos);
+ return hung_up_tty_write(iocb, from);
if (!ld->ops->write)
ret = -EIO;
else
- ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+ ret = do_tty_write(ld->ops->write, tty, file, from);
tty_ldisc_deref(ld);
return ret;
}
-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ return file_tty_write(iocb->ki_filp, iocb, from);
+}
+
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *p = NULL;
@@ -1057,13 +1064,17 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
p = get_file(redirect);
spin_unlock(&redirect_lock);
+ /*
+ * We know the redirected tty is just another tty, we can can
+ * call file_tty_write() directly with that file pointer.
+ */
if (p) {
ssize_t res;
- res = vfs_write(p, buf, count, &p->f_pos);
+ res = file_tty_write(p, iocb, iter);
fput(p);
return res;
}
- return tty_write(file, buf, count, ppos);
+ return tty_write(iocb, iter);
}
/*
@@ -2295,7 +2306,7 @@ static int tioccons(struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (file->f_op->write == redirected_tty_write) {
+ if (file->f_op->write_iter == redirected_tty_write) {
struct file *f;
spin_lock(&redirect_lock);
f = redirect;
@@ -2305,6 +2316,12 @@ static int tioccons(struct file *file)
fput(f);
return 0;
}
+ if (file->f_op->write_iter != tty_write)
+ return -ENOTTY;
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+ if (!(file->f_mode & FMODE_CAN_WRITE))
+ return -EINVAL;
spin_lock(&redirect_lock);
if (redirect) {
spin_unlock(&redirect_lock);
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
index 22a56c4dce67..7990fee03fe4 100644
--- a/drivers/usb/cdns3/cdns3-imx.c
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
}
data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
- data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+ data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+ sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
if (ret)
return ret;
@@ -214,20 +218,16 @@ err:
return ret;
}
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
-
- return 0;
-}
-
static int cdns_imx_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cdns_imx *data = dev_get_drvdata(dev);
- device_for_each_child(dev, NULL, cdns_imx_remove_core);
+ pm_runtime_get_sync(dev);
+ of_platform_depopulate(dev);
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 9e12152ea46b..8b7bc10b6e8b 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
- if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+ if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(misc_pdev)) {
+ put_device(&misc_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
data->dev = &misc_pdev->dev;
/*
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f52f1bc0559f..781905745812 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1895,6 +1895,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x04d8, 0xfd08),
.driver_info = IGNORE_DEVICE,
},
+
+ { USB_DEVICE(0x04d8, 0xf58b),
+ .driver_info = IGNORE_DEVICE,
+ },
#endif
/*Samsung phone in firmware update mode */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 02d0cfd23bb2..508b1c3f8b73 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
if (!desc->resp_count || !--desc->resp_count)
goto out;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ rv = -ENODEV;
+ goto out;
+ }
+ if (test_bit(WDM_RESETTING, &desc->flags)) {
+ rv = -EIO;
+ goto out;
+ }
+
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
- dev_err(&desc->intf->dev,
- "usb_submit_urb failed with result %d\n", rv);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
@@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 67cbd42421be..c9f6e9758288 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
#define usblp_reset(usblp)\
usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kzalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+ channel, buf, 1);
+ if (ret == 0)
+ *new_channel = buf[0];
+
+ kfree(buf);
+
+ return ret;
+}
/*
* See the description for usblp_select_alts() below for the usage
@@ -1312,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
- alts = usblp->protocol[protocol].alt_setting;
- if (alts < 0)
- return -EINVAL;
- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
- if (r < 0) {
- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
- alts, usblp->ifnum);
- return r;
+ /* Don't unnecessarily set the interface if there's a single alt. */
+ if (usblp->intf->num_altsetting > 1) {
+ alts = usblp->protocol[protocol].alt_setting;
+ if (alts < 0)
+ return -EINVAL;
+ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
+ if (r < 0) {
+ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
+ alts, usblp->ifnum);
+ return r;
+ }
}
usblp->bidir = (usblp->protocol[protocol].epread != NULL);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 60886a7464c3..ad5a0f405a75 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
urb->status = status;
/*
* This function can be called in task context inside another remote
- * coverage collection section, but KCOV doesn't support that kind of
+ * coverage collection section, but kcov doesn't support that kind of
* recursion yet. Only collect coverage in softirq context for now.
*/
- if (in_serving_softirq())
- kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+ kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- if (in_serving_softirq())
- kcov_remote_stop();
+ kcov_remote_stop_softirq();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 0a0d11151cfb..ad4c94366dad 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex)
{
- struct dwc2_hsotg_ep *ep;
int dir = (windex & USB_DIR_IN) ? 1 : 0;
int idx = windex & 0x7F;
@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
if (idx > hsotg->num_of_eps)
return NULL;
- ep = index_to_ep(hsotg, idx, dir);
-
- if (idx && ep->dir_in != dir)
- return NULL;
-
- return ep;
+ return index_to_ep(hsotg, idx, dir);
}
/**
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 841daec70b6e..3101f0dcf6ae 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1758,7 +1758,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (PMSG_IS_AUTO(msg))
break;
- ret = dwc3_core_init(dwc);
+ ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2f95f08ca511..1b241f937d8f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -285,6 +285,7 @@
/* Global USB2 PHY Vendor Control Register */
#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_DONE BIT(24)
#define DWC3_GUSB2PHYACC_BUSY BIT(23)
#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 417e05381b5d..bdf1f98dfad8 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
- return ret;
+ goto err_disable_clks;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 78cb4db8a6e4..ee44321fee38 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1763,6 +1763,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
list_for_each_entry_safe(r, t, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(r);
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
goto out;
}
}
@@ -2083,6 +2085,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
@@ -2145,6 +2148,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
dwc->ev_buf->length;
}
+ } else {
+ __dwc3_gadget_start(dwc);
}
ret = dwc3_gadget_run_stop(dwc, is_on, false);
@@ -2319,10 +2324,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc->gadget_driver = driver;
-
- if (pm_runtime_active(dwc->dev))
- __dwc3_gadget_start(dwc);
-
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
@@ -2348,13 +2349,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
-
- if (pm_runtime_suspended(dwc->dev))
- goto out;
-
- __dwc3_gadget_stop(dwc);
-
-out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index aa213c9815f6..f23f4c9a557e 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -7,6 +7,8 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/delay.h>
+#include <linux/time64.h>
#include <linux/ulpi/regs.h>
#include "core.h"
@@ -17,14 +19,28 @@
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{
- unsigned int count = 1000;
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+ unsigned int count = 10000;
u32 reg;
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ if (read)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+ usleep_range(1000, 1200);
+
while (count--) {
+ ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
- if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+ if (reg & DWC3_GUSB2PHYACC_DONE)
return 0;
cpu_relax();
}
@@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
u32 reg;
int ret;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- ret = dwc3_ulpi_busyloop(dwc);
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret)
return ret;
@@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- return dwc3_ulpi_busyloop(dwc);
+ return dwc3_ulpi_busyloop(dwc, addr, false);
}
static const struct ulpi_ops dwc3_ulpi_ops = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7e47e6223089..2d152571a7de 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
depends on NET
select USB_U_ETHER
select USB_F_NCM
+ select CRC32
help
NCM is an advanced protocol for Ethernet encapsulation, allows
grouping of several ethernet frames into one USB transfer and
@@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
depends on NET
select USB_U_ETHER
select USB_F_EEM
+ select CRC32
help
CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
and therefore can be supported by more hardware. Technically ECM and
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index c6d455f2bb92..1a556a628971 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -392,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function)
spin_lock_irqsave(&cdev->lock, flags);
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_deactivate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
if (status == 0)
cdev->deactivations++;
@@ -424,8 +427,11 @@ int usb_function_activate(struct usb_function *function)
status = -EINVAL;
else {
cdev->deactivations--;
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_activate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
}
spin_unlock_irqrestore(&cdev->lock, flags);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 56051bb97349..36ffb43f9c1a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+ struct gadget_info *gi = to_gadget_info(item);
+ char *udc_name;
+ int ret;
+
+ mutex_lock(&gi->lock);
+ udc_name = gi->composite.gadget_driver.udc_name;
+ ret = sprintf(page, "%s\n", udc_name ?: "");
+ mutex_unlock(&gi->lock);
- return sprintf(page, "%s\n", udc_name ?: "");
+ return ret;
}
static int unregister_gadget(struct gadget_info *gi)
@@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
cfg = container_of(c, struct config_usb_cfg, c);
- list_for_each_entry_safe(f, tmp, &c->functions, list) {
+ list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
- list_move_tail(&f->list, &cfg->func_list);
+ list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
"unbind function '%s'/%p\n",
@@ -1536,7 +1543,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
.suspend = configfs_composite_suspend,
.resume = configfs_composite_resume,
- .max_speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
@@ -1576,7 +1583,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER;
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 64a4112068fc..2f1eb2e81d30 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1162,6 +1162,7 @@ fail_tx_reqs:
printer_req_free(dev->in_ep, req);
}
+ usb_free_all_descriptors(f);
return ret;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 3633df6d7610..5d960b6603b6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
- unsigned int factor, bool is_playback)
+ enum usb_device_speed speed, bool is_playback)
{
int chmask, srate, ssize;
- u16 max_packet_size;
+ u16 max_size_bw, max_size_ep;
+ unsigned int factor;
+
+ switch (speed) {
+ case USB_SPEED_FULL:
+ max_size_ep = 1023;
+ factor = 1000;
+ break;
+
+ case USB_SPEED_HIGH:
+ max_size_ep = 1024;
+ factor = 8000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
if (is_playback) {
chmask = uac2_opts->p_chmask;
@@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- max_packet_size = num_channels(chmask) * ssize *
+ max_size_bw = num_channels(chmask) * ssize *
DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
- ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
- le16_to_cpu(ep_desc->wMaxPacketSize)));
+ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+ max_size_ep));
+
+ return 0;
}
/* Use macro to overcome line length limitation */
@@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
/* Calculate wMaxPacketSize according to audio bandwidth */
- set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
- set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
- set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
- set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
if (EPOUT_EN(uac2_opts)) {
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index f7e6c42558eb..b859a158a414 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -204,7 +204,7 @@ int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (!(filp->f_mode & FMODE_WRITE))
ro = 1;
- inode = file_inode(filp);
+ inode = filp->f_mapping->host;
if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
LINFO(curlun, "invalid file type: %s\n", filename);
goto out;
@@ -221,7 +221,7 @@ int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (!(filp->f_mode & FMODE_CAN_WRITE))
ro = 1;
- size = i_size_read(inode->i_mapping->host);
+ size = i_size_read(inode);
if (size < 0) {
LINFO(curlun, "unable to find file size: %s\n", filename);
rc = (int) size;
@@ -231,8 +231,8 @@ int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (curlun->cdrom) {
blksize = 2048;
blkbits = 11;
- } else if (inode->i_bdev) {
- blksize = bdev_logical_block_size(inode->i_bdev);
+ } else if (S_ISBLK(inode->i_mode)) {
+ blksize = bdev_logical_block_size(I_BDEV(inode));
blkbits = blksize_bits(blksize);
} else {
blksize = 512;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 31ea76adcc0d..c019f2b0c0af 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -45,9 +45,10 @@
#define UETH__VERSION "29-May-2008"
/* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
* blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
struct eth_dev {
/* lock is held while accessing port_usb
@@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname)
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
return net;
}
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index 59be2d8417c9..e8033e5f0c18 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail_string_ids;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index 30313b233680..99c7fc0d1d59 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail1;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1a12aab208b4..8c614bb86c66 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -90,7 +90,7 @@ config USB_BCM63XX_UDC
config USB_FSL_USB2
tristate "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC
help
Some of Freescale PowerPC and i.MX processors have a High Speed
Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index f5a7ce28aecd..a21f2224e7eb 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
-fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 0bd6b20435b8..02d8bfae58fb 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
u32 state, reg, loops;
/* Stop DMA activity */
- writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ if (ep->epn.desc_mode)
+ writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ else
+ writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Wait for it to complete */
for (loops = 0; loops < 1000; loops++) {
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index 6497185ec4e7..bfd8e77788e2 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -999,8 +999,10 @@ static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
str_array[offset].s = NULL;
ret = ast_vhub_str_alloc_add(vhub, &lang_str);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
return ret;
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
index 3e88c7670b2e..fb01ff47b64c 100644
--- a/drivers/usb/gadget/udc/bdc/Kconfig
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -17,7 +17,7 @@ if USB_BDC_UDC
comment "Platform Support"
config USB_BDC_PCI
tristate "BDC support for PCIe based platforms"
- depends on USB_PCI
+ depends on USB_PCI && BROKEN
default USB_BDC_UDC
help
Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 5b5cfeb6c14a..ea114f922ccf 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -659,8 +659,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered). This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
+ * is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
@@ -1530,10 +1529,13 @@ static ssize_t soft_connect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ ssize_t ret;
+ mutex_lock(&udc_lock);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (sysfs_streq(buf, "connect")) {
@@ -1544,10 +1546,14 @@ static ssize_t soft_connect_store(struct device *dev,
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return n;
+ ret = n;
+out:
+ mutex_unlock(&udc_lock);
+ return ret;
}
static DEVICE_ATTR_WO(soft_connect);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ab5e978b5052..57067763b100 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2118,9 +2118,21 @@ static int dummy_hub_control(
dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
- default:
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
+ break;
+ default:
+ /* Disallow INDICATOR and C_OVER_CURRENT */
+ goto error;
}
break;
case GetHubDescriptor:
@@ -2258,17 +2270,20 @@ static int dummy_hub_control(
}
fallthrough;
case USB_PORT_FEAT_RESET:
+ if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+ break;
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
- dum_hcd->port_status = 0;
dum_hcd->port_status =
(USB_SS_PORT_STAT_POWER |
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_RESET);
- } else
+ } else {
dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
+ dum_hcd->port_status |= USB_PORT_STAT_RESET;
+ }
/*
* We want to reset device status. All but the
* Self powered feature
@@ -2280,19 +2295,19 @@ static int dummy_hub_control(
* interval? Is it still 50msec as for HS?
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
- fallthrough;
- default:
- if (hcd->speed == HCD_USB3) {
- if ((dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
- } else
- if ((dum_hcd->port_status &
- USB_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
set_link_state(dum_hcd);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3, and ignored for USB-2 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ break;
+ default:
+ /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+ goto error;
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
deleted file mode 100644
index 5a321992decc..000000000000
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2009
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Description:
- * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
- * driver to function correctly on these systems.
- */
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fsl_devices.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "fsl_usb2_udc.h"
-
-static struct clk *mxc_ahb_clk;
-static struct clk *mxc_per_clk;
-static struct clk *mxc_ipg_clk;
-
-/* workaround ENGcm09152 for i.MX35 */
-#define MX35_USBPHYCTRL_OFFSET 0x600
-#define USBPHYCTRL_OTGBASE_OFFSET 0x8
-#define USBPHYCTRL_EVDO (1 << 23)
-
-int fsl_udc_clk_init(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata;
- unsigned long freq;
- int ret;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(mxc_ipg_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n");
- return PTR_ERR(mxc_ipg_clk);
- }
-
- mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(mxc_ahb_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n");
- return PTR_ERR(mxc_ahb_clk);
- }
-
- mxc_per_clk = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(mxc_per_clk)) {
- dev_err(&pdev->dev, "clk_get(\"per\") failed\n");
- return PTR_ERR(mxc_per_clk);
- }
-
- clk_prepare_enable(mxc_ipg_clk);
- clk_prepare_enable(mxc_ahb_clk);
- clk_prepare_enable(mxc_per_clk);
-
- /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
- if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
- freq = clk_get_rate(mxc_per_clk);
- if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
- (freq < 59999000 || freq > 60001000)) {
- dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
- ret = -EINVAL;
- goto eclkrate;
- }
- }
-
- return 0;
-
-eclkrate:
- clk_disable_unprepare(mxc_ipg_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- return ret;
-}
-
-int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
-
- /* workaround ENGcm09152 for i.MX35 */
- if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
- unsigned int v;
- struct resource *res = platform_get_resource
- (pdev, IORESOURCE_MEM, 0);
- void __iomem *phy_regs = ioremap(res->start +
- MX35_USBPHYCTRL_OFFSET, 512);
- if (!phy_regs) {
- dev_err(&pdev->dev, "ioremap for phy address fails\n");
- ret = -EINVAL;
- goto ioremap_err;
- }
-
- v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
- writel(v | USBPHYCTRL_EVDO,
- phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-
- iounmap(phy_regs);
- }
-
-
-ioremap_err:
- /* ULPI transceivers don't need usbpll */
- if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- }
-
- return ret;
-}
-
-void fsl_udc_clk_release(void)
-{
- if (mxc_per_clk)
- clk_disable_unprepare(mxc_per_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_ipg_clk);
-}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index e358ae17d51e..1926b328b6aa 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 hcc_params;
+ int rc;
hcd->uses_new_polling = 1;
@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
down_write(&ehci_cf_port_reset_rwsem);
ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+ /* Wait until HC become operational */
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
+ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
up_write(&ehci_cf_port_reset_rwsem);
+
+ if (rc) {
+ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+ return rc;
+ }
+
ehci->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 087402aec5cb..9f9ab5ccea88 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
unlink_empty_async_suspended(ehci);
+ /* Some Synopsys controllers mistakenly leave IAA turned on */
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
/* Any IAA cycle that started before the suspend is now invalid */
end_iaa_cycle(ehci);
ehci_handle_start_intr_unlinks(ehci);
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index fb52133c3557..98568b046a1a 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -200,7 +200,7 @@ fail_start:
return result;
}
-static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
+static void ps3_ehci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
@@ -227,8 +227,6 @@ static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
ps3_dma_region_free(dev->d_region);
ps3_close_hv_device(dev);
-
- return 0;
}
static int __init ps3_ehci_driver_register(struct ps3_system_bus_driver *drv)
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index f77cd6af0ccf..4f5af929c3e4 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -184,7 +184,7 @@ fail_start:
return result;
}
-static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
+static void ps3_ohci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
@@ -212,8 +212,6 @@ static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
ps3_dma_region_free(dev->d_region);
ps3_close_hv_device(dev);
-
- return 0;
}
static int __init ps3_ohci_driver_register(struct ps3_system_bus_driver *drv)
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 45c54d56ecbd..b45e5bf08997 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
sch_ep->sch_tt = tt;
sch_ep->ep = ep;
+ INIT_LIST_HEAD(&sch_ep->endpoint);
+ INIT_LIST_HEAD(&sch_ep->tt_endpoint);
return sch_ep;
}
@@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
sch_ep->bw_budget_table[j];
}
}
+ sch_ep->allocated = used;
}
static int check_sch_tt(struct usb_device *udev,
@@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev,
return 0;
}
+static void destroy_sch_ep(struct usb_device *udev,
+ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
+{
+ /* only release ep bw check passed by check_sch_bw() */
+ if (sch_ep->allocated)
+ update_bus_bw(sch_bw, sch_ep, 0);
+
+ list_del(&sch_ep->endpoint);
+
+ if (sch_ep->sch_tt) {
+ list_del(&sch_ep->tt_endpoint);
+ drop_tt(udev);
+ }
+ kfree(sch_ep);
+}
+
static bool need_bw_sch(struct usb_host_endpoint *ep,
enum usb_device_speed speed, int has_tt)
{
@@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
mtk->sch_array = sch_array;
+ INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
@@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_virt_device *virt_dev;
- struct mu3h_sch_bw_info *sch_bw;
struct mu3h_sch_ep_info *sch_ep;
- struct mu3h_sch_bw_info *sch_array;
unsigned int ep_index;
- int bw_index;
- int ret = 0;
xhci = hcd_to_xhci(hcd);
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
- sch_array = mtk->sch_array;
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
@@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
- bw_index = get_bw_index(xhci, udev, ep);
- sch_bw = &sch_array[bw_index];
-
sch_ep = create_sch_ep(udev, ep, ep_ctx);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
- ret = check_sch_bw(udev, sch_bw, sch_ep);
- if (ret) {
- xhci_err(xhci, "Not enough bandwidth!\n");
- if (is_fs_or_ls(udev->speed))
- drop_tt(udev);
-
- kfree(sch_ep);
- return -ENOSPC;
- }
-
- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
-
- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
- | EP_BREPEAT(sch_ep->repeat));
-
- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
- sch_ep->offset, sch_ep->repeat);
+ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
return 0;
}
@@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_virt_device *virt_dev;
struct mu3h_sch_bw_info *sch_array;
struct mu3h_sch_bw_info *sch_bw;
- struct mu3h_sch_ep_info *sch_ep;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
int bw_index;
xhci = hcd_to_xhci(hcd);
@@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
+ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
- update_bus_bw(sch_bw, sch_ep, 0);
- list_del(&sch_ep->endpoint);
- if (is_fs_or_ls(udev->speed)) {
- list_del(&sch_ep->tt_endpoint);
- drop_tt(udev);
- }
- kfree(sch_ep);
+ destroy_sch_ep(udev, sch_bw, sch_ep);
break;
}
}
}
EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
+
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index, ret;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ ret = check_sch_bw(udev, sch_bw, sch_ep);
+ if (ret) {
+ xhci_err(xhci, "Not enough bandwidth!\n");
+ return -ENOSPC;
+ }
+ }
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ struct xhci_ep_ctx *ep_ctx;
+ struct usb_host_endpoint *ep = sch_ep->ep;
+ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ bw_index = get_bw_index(xhci, udev, ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+ | EP_BCSCOUNT(sch_ep->cs_count)
+ | EP_BBM(sch_ep->burst_mode));
+ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+ | EP_BREPEAT(sch_ep->repeat));
+
+ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
+ sch_ep->offset, sch_ep->repeat);
+ }
+
+ return xhci_check_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
+
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+ destroy_sch_ep(udev, sch_bw, sch_ep);
+ }
+
+ xhci_reset_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 8f321f39ab96..fe010cc61f19 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
static int xhci_mtk_setup(struct usb_hcd *hcd);
static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
.reset = xhci_mtk_setup,
+ .check_bandwidth = xhci_mtk_check_bandwidth,
+ .reset_bandwidth = xhci_mtk_reset_bandwidth,
};
static struct hc_driver __read_mostly xhci_mtk_hc_driver;
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index a93cfe817904..cbb09dfea62e 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -59,6 +59,7 @@ struct mu3h_sch_bw_info {
* @ep_type: endpoint type
* @maxpkt: max packet size of endpoint
* @ep: address of usb_host_endpoint struct
+ * @allocated: the bandwidth is aready allocated from bus_bw
* @offset: which uframe of the interval that transfer should be
* scheduled first time within the interval
* @repeat: the time gap between two uframes that transfers are
@@ -86,6 +87,7 @@ struct mu3h_sch_ep_info {
u32 ep_type;
u32 maxpkt;
void *ep;
+ bool allocated;
/*
* mtk xHCI scheduling information put into reserved DWs
* in ep context
@@ -131,6 +133,7 @@ struct xhci_hcd_mtk {
struct device *dev;
struct usb_hcd *hcd;
struct mu3h_sch_bw_info *sch_array;
+ struct list_head bw_ep_chk_list;
struct mu3c_ippc_regs __iomem *ippc_regs;
bool has_ippc;
int num_u2_ports;
@@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
#else
static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
@@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd,
{
}
+static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ return 0;
+}
+
+static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+}
#endif
#endif /* _XHCI_MTK_H_ */
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 60651a50770f..8ca1a235d164 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -8,6 +8,7 @@
#include <linux/mbus.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct phy *phy;
+ int ret;
+
+ /* Old bindings miss the PHY handle */
+ phy = of_phy_get(dev->of_node, "usb3-phy");
+ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(phy))
+ goto phy_out;
+
+ ret = phy_init(phy);
+ if (ret)
+ goto phy_put;
+
+ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS);
+ if (ret)
+ goto phy_exit;
+
+ ret = phy_power_on(phy);
+ if (ret == -EOPNOTSUPP) {
+ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */
+ dev_warn(dev, "PHY unsupported by firmware\n");
+ xhci->quirks |= XHCI_SKIP_PHY_INIT;
+ }
+ if (ret)
+ goto phy_exit;
+
+ phy_power_off(phy);
+phy_exit:
+ phy_exit(phy);
+phy_put:
+ of_phy_put(phy);
+phy_out:
+
+ return 0;
+}
+
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 3be021793cc8..01bf3fcb3eca 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -12,6 +12,7 @@ struct usb_hcd;
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd);
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
@@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
+static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
return 0;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 4d34f6005381..c1edcc9b13ce 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd)
priv->plat_start(hcd);
}
+static int xhci_priv_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ if (!priv->plat_setup)
+ return 0;
+
+ return priv->plat_setup(hcd);
+}
+
static int xhci_priv_init_quirk(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
@@ -111,6 +121,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
+ .plat_setup = xhci_mvebu_a3700_plat_setup,
.init_quirk = xhci_mvebu_a3700_init_quirk,
};
@@ -330,7 +341,14 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;
- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+
+ if (priv) {
+ ret = xhci_priv_plat_setup(hcd);
+ if (ret)
+ goto disable_usb_phy;
+ }
+
+ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 1fb149d1fbce..561d0b7bce09 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -13,6 +13,7 @@
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ int (*plat_setup)(struct usb_hcd *);
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*suspend_quirk)(struct usb_hcd *);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5677b81c0915..89c3be9917f6 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -699,11 +699,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
/* for in tranfers we need to copy the data from bounce to sg */
- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
- seg->bounce_len, seg->bounce_offs);
- if (len != seg->bounce_len)
- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
- len, seg->bounce_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
+ } else {
+ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
+ seg->bounce_len);
+ }
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
@@ -2931,6 +2936,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
+ /* make sure TRB is fully written before giving it to the controller */
+ wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
@@ -3275,12 +3282,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
- seg->bounce_buf, new_buff_len, enqd_len);
- if (len != new_buff_len)
- xhci_warn(xhci,
- "WARN Wrong bounce buffer write length: %zu != %d\n",
- len, new_buff_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ if (len != new_buff_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
+ len, new_buff_len);
+ } else {
+ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
+ }
+
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 934be1686352..50bb91b6a4b8 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
enable);
if (err < 0)
break;
+
+ /*
+ * wait 500us for LFPS detector to be disabled before
+ * sending ACK
+ */
+ if (!enable)
+ usleep_range(500, 1000);
}
if (err < 0) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 91ab81c3fc79..345a221028c6 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2985,7 +2985,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
* else should be touching the xhci->devs[slot_id] structure, so we
* don't need to take the xhci->lock for manipulating that.
*/
-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
@@ -3083,7 +3083,7 @@ command_cleanup:
return ret;
}
-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
@@ -4770,19 +4770,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ else
+ timeout_ns = udev->u1_params.sel;
+
/* Prevent U1 if service interval is shorter than U1 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
- else
- timeout_ns = udev->u1_params.sel;
-
/* The U1 timeout is encoded in 1us intervals.
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
*/
@@ -4834,19 +4834,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ else
+ timeout_ns = udev->u2_params.sel;
+
/* Prevent U2 if service interval is shorter than U2 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
- else
- timeout_ns = udev->u2_params.sel;
-
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
@@ -5510,6 +5510,10 @@ void xhci_init_driver(struct hc_driver *drv,
drv->reset = over->reset;
if (over->start)
drv->start = over->start;
+ if (over->check_bandwidth)
+ drv->check_bandwidth = over->check_bandwidth;
+ if (over->reset_bandwidth)
+ drv->reset_bandwidth = over->reset_bandwidth;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 25e57bc9c3cc..07ff95016f11 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1920,6 +1920,8 @@ struct xhci_driver_overrides {
size_t extra_priv_size;
int (*reset)(struct usb_hcd *hcd);
int (*start)(struct usb_hcd *hcd);
+ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
};
#define XHCI_CFC_DELAY 10
@@ -2074,6 +2076,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_shutdown(struct usb_hcd *hcd);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 73ebfa6e9715..c640f98d20c5 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
+ /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+ usb_kill_urb(dev->cntl_urb);
+
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index ac9a81ae8216..e6fa13701808 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);
+ usbhs_pipe_running(pipe, 0);
__usbhsf_pkt_del(pkt);
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fbb10dfc56e3..7bec1e730b20 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
@@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
+ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index f1201d4de297..e8f06b41a503 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -532,23 +532,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
struct device *dev = &port->dev;
int i;
int status;
- u8 rxcmd = IUU_UART_RX;
+ u8 *rxcmd;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
+ rxcmd = kmalloc(1, GFP_KERNEL);
+ if (!rxcmd)
+ return -ENOMEM;
+
+ rxcmd[0] = IUU_UART_RX;
+
for (i = 0; i < 2; i++) {
- status = bulk_immediate(port, &rxcmd, 1);
+ status = bulk_immediate(port, rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
- return status;
+ goto out_free;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
if (priv->len > 0) {
@@ -556,12 +562,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+ kfree(rxcmd);
+
return status;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2c21e34235bb..2049e66f34a3 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
#define CINTERION_PRODUCT_CLS8 0x00b0
+#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
+#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -1117,6 +1119,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -1912,6 +1916,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
+ .driver_info = RSVD(0)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2057,6 +2065,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 870e9cf3d5dc..f9677a5ec31b 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -91,6 +91,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
US_FL_BROKEN_FUA),
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
"PNY",
"Pro Elite SSD",
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 187690fd1a5b..60d375e9c3c7 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
to enable support for VirtualLink devices with NVIDIA GPUs.
To compile this driver as a module, choose M here: the
- module will be called typec_displayport.
+ module will be called typec_nvidia.
endmenu
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebfd3113a9a8..8f77669f9cf4 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -766,6 +766,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod
return ret;
sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -923,6 +924,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
return ret;
sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
return 0;
}
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index cf37a59ce130..46a25b8db72e 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -207,10 +207,21 @@ static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
u8 msg[2] = { };
+ int ret;
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+ /* Configure HPD first if HPD,IRQ comes together */
+ if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+ dp->status & DP_STATUS_IRQ_HPD &&
+ dp->status & DP_STATUS_HPD_STATE) {
+ msg[1] = PMC_USB_DP_HPD_LVL;
+ ret = pmc_usb_command(port, msg, sizeof(msg));
+ if (ret)
+ return ret;
+ }
+
if (dp->status & DP_STATUS_IRQ_HPD)
msg[1] = PMC_USB_DP_HPD_IRQ;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 66cde5e5f796..3209b5ddd30c 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
+ if (wValue >= 32)
+ goto error;
vhci_hcd->port_status[rhport] &= ~(1 << wValue);
break;
}
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index 6caf539091e5..92a6396f8a73 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -9,21 +9,24 @@ menuconfig VDPA
if VDPA
config VDPA_SIM
- tristate "vDPA device simulator"
+ tristate "vDPA device simulator core"
depends on RUNTIME_TESTING_MENU && HAS_DMA
select DMA_OPS
select VHOST_RING
+ help
+ Enable this module to support vDPA device simulators. These devices
+ are used for testing, prototyping and development of vDPA.
+
+config VDPA_SIM_NET
+ tristate "vDPA simulator for networking device"
+ depends on VDPA_SIM
select GENERIC_NET_UTILS
- default n
help
- vDPA networking device simulator which loop TX traffic back
- to RX. This device is used for testing, prototyping and
- development of vDPA.
+ vDPA networking device simulator which loops TX traffic back to RX.
config IFCVF
tristate "Intel IFC VF vDPA driver"
depends on PCI_MSI
- default n
help
This kernel module can drive Intel IFC VF NIC to offload
virtio dataplane traffic to hardware.
@@ -42,7 +45,6 @@ config MLX5_VDPA_NET
tristate "vDPA driver for ConnectX devices"
select MLX5_VDPA
depends on MLX5_CORE
- default n
help
VDPA network driver for ConnectX6 and newer. Provides offloading
of virtio net datapath such that descriptors put on the ring will
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 8b4028556cb6..fa1af301cf55 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -417,16 +417,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
- IFCVF_ERR(pdev, "No usable DMA confiugration\n");
- return ret;
- }
-
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- IFCVF_ERR(pdev,
- "No usable coherent DMA confiugration\n");
+ IFCVF_ERR(pdev, "No usable DMA configuration\n");
return ret;
}
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 5c92a576edae..08f742fd2409 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -15,6 +15,7 @@ struct mlx5_vdpa_direct_mr {
struct sg_table sg_head;
int log_size;
int nsg;
+ int nent;
struct list_head list;
u64 offset;
};
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 4b6195666c58..d300f799efcd 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -25,17 +25,6 @@ static int get_octo_len(u64 len, int page_shift)
return (npages + 1) / 2;
}
-static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
-{
- struct scatterlist *sg;
- __be64 *pas;
- int i;
-
- pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
- (*pas) = cpu_to_be64(sg_dma_address(sg));
-}
-
static void mlx5_set_access_mode(void *mkc, int mode)
{
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
@@ -45,10 +34,18 @@ static void mlx5_set_access_mode(void *mkc, int mode)
static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
{
struct scatterlist *sg;
+ int nsg = mr->nsg;
+ u64 dma_addr;
+ u64 dma_len;
+ int j = 0;
int i;
- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
- mtt[i] = cpu_to_be64(sg_dma_address(sg));
+ for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
+ for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
+ nsg && dma_len;
+ nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
+ mtt[j++] = cpu_to_be64(dma_addr);
+ }
}
static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
@@ -64,7 +61,6 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
- fill_sg(mr, in);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
@@ -276,8 +272,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
- err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
- if (!err)
+ mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ if (!mr->nent)
goto err_map;
err = create_direct_mr(mvdev, mr);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index f1d54814db97..b5fe6d2ad22f 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -87,6 +87,7 @@ struct mlx5_vq_restore_info {
u64 device_addr;
u64 driver_addr;
u16 avail_index;
+ u16 used_index;
bool ready;
struct vdpa_callback cb;
bool restore;
@@ -121,6 +122,7 @@ struct mlx5_vdpa_virtqueue {
u32 virtq_id;
struct mlx5_vdpa_net *ndev;
u16 avail_idx;
+ u16 used_idx;
int fw_state;
/* keep last in the struct */
@@ -479,6 +481,11 @@ static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
{
mlx5_cq_set_ci(&mvq->cq.mcq);
+
+ /* make sure CQ cosumer update is visible to the hardware before updating
+ * RX doorbell record.
+ */
+ dma_wmb();
rx_post(&mvq->vqqp, num);
if (mvq->event_cb.callback)
mvq->event_cb.callback(mvq->event_cb.private);
@@ -799,6 +806,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+ MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
get_features_12_3(ndev->mvdev.actual_features));
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
@@ -1017,6 +1025,7 @@ static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
struct mlx5_virtq_attr {
u8 state;
u16 available_index;
+ u16 used_index;
};
static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
@@ -1047,6 +1056,7 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu
memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
+ attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
kfree(out);
return 0;
@@ -1530,6 +1540,16 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
}
}
+static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
+ ndev->vqs[i].avail_idx = 0;
+ ndev->vqs[i].used_idx = 0;
+ }
+}
+
/* TODO: cross-endian support */
static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
{
@@ -1605,6 +1625,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
return err;
ri->avail_index = attr.available_index;
+ ri->used_index = attr.used_index;
ri->ready = mvq->ready;
ri->num_ent = mvq->num_ent;
ri->desc_addr = mvq->desc_addr;
@@ -1649,6 +1670,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
continue;
mvq->avail_idx = ri->avail_index;
+ mvq->used_idx = ri->used_index;
mvq->ready = ri->ready;
mvq->num_ent = ri->num_ent;
mvq->desc_addr = ri->desc_addr;
@@ -1763,6 +1785,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev);
+ clear_virtqueues(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index a69ffc991e13..c0825650c055 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -89,7 +89,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (!vdev)
goto err;
- err = ida_simple_get(&vdpa_index_ida, 0, 0, GFP_KERNEL);
+ err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
if (err < 0)
goto err_ida;
diff --git a/drivers/vdpa/vdpa_sim/Makefile b/drivers/vdpa/vdpa_sim/Makefile
index b40278f65e04..79d4536d347e 100644
--- a/drivers/vdpa/vdpa_sim/Makefile
+++ b/drivers/vdpa/vdpa_sim/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VDPA_SIM) += vdpa_sim.o
+obj-$(CONFIG_VDPA_SIM_NET) += vdpa_sim_net.o
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 6a90fdb9cbfc..b3fcc67bfdf0 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * VDPA networking device simulator.
+ * VDPA device simulator core.
*
* Copyright (c) 2020, Red Hat Inc. All rights reserved.
* Author: Jason Wang <jasowang@redhat.com>
@@ -11,97 +11,32 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/uuid.h>
-#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
-#include <linux/sysfs.h>
-#include <linux/file.h>
-#include <linux/etherdevice.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
-#include <linux/virtio_byteorder.h>
#include <linux/vhost_iotlb.h>
-#include <uapi/linux/virtio_config.h>
-#include <uapi/linux/virtio_net.h>
+
+#include "vdpa_sim.h"
#define DRV_VERSION "0.1"
#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
-#define DRV_DESC "vDPA Device Simulator"
+#define DRV_DESC "vDPA Device Simulator core"
#define DRV_LICENSE "GPL v2"
static int batch_mapping = 1;
module_param(batch_mapping, int, 0444);
MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
-static char *macaddr;
-module_param(macaddr, charp, 0);
-MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
-
-struct vdpasim_virtqueue {
- struct vringh vring;
- struct vringh_kiov iov;
- unsigned short head;
- bool ready;
- u64 desc_addr;
- u64 device_addr;
- u64 driver_addr;
- u32 num;
- void *private;
- irqreturn_t (*cb)(void *data);
-};
+static int max_iotlb_entries = 2048;
+module_param(max_iotlb_entries, int, 0444);
+MODULE_PARM_DESC(max_iotlb_entries,
+ "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
#define VDPASIM_QUEUE_MAX 256
-#define VDPASIM_DEVICE_ID 0x1
#define VDPASIM_VENDOR_ID 0
-#define VDPASIM_VQ_NUM 0x2
-#define VDPASIM_NAME "vdpasim-netdev"
-
-static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
- (1ULL << VIRTIO_F_VERSION_1) |
- (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
- (1ULL << VIRTIO_NET_F_MAC);
-
-/* State of each vdpasim device */
-struct vdpasim {
- struct vdpa_device vdpa;
- struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM];
- struct work_struct work;
- /* spinlock to synchronize virtqueue state */
- spinlock_t lock;
- struct virtio_net_config config;
- struct vhost_iotlb *iommu;
- void *buffer;
- u32 status;
- u32 generation;
- u64 features;
- /* spinlock to synchronize iommu table */
- spinlock_t iommu_lock;
-};
-
-/* TODO: cross-endian support */
-static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
-{
- return virtio_legacy_is_little_endian() ||
- (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
-}
-
-static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
-{
- return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
-}
-
-static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
-{
- return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
-}
-
-static struct vdpasim *vdpasim_dev;
static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
{
@@ -115,20 +50,34 @@ static struct vdpasim *dev_to_sim(struct device *dev)
return vdpa_to_sim(vdpa);
}
+static void vdpasim_vq_notify(struct vringh *vring)
+{
+ struct vdpasim_virtqueue *vq =
+ container_of(vring, struct vdpasim_virtqueue, vring);
+
+ if (!vq->cb)
+ return;
+
+ vq->cb(vq->private);
+}
+
static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
{
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
- vringh_init_iotlb(&vq->vring, vdpasim_features,
+ vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
VDPASIM_QUEUE_MAX, false,
(struct vring_desc *)(uintptr_t)vq->desc_addr,
(struct vring_avail *)
(uintptr_t)vq->driver_addr,
(struct vring_used *)
(uintptr_t)vq->device_addr);
+
+ vq->vring.notify = vdpasim_vq_notify;
}
-static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
+static void vdpasim_vq_reset(struct vdpasim *vdpasim,
+ struct vdpasim_virtqueue *vq)
{
vq->ready = false;
vq->desc_addr = 0;
@@ -136,16 +85,18 @@ static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
vq->device_addr = 0;
vq->cb = NULL;
vq->private = NULL;
- vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX,
- false, NULL, NULL, NULL);
+ vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
+ VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
+
+ vq->vring.notify = NULL;
}
static void vdpasim_reset(struct vdpasim *vdpasim)
{
int i;
- for (i = 0; i < VDPASIM_VQ_NUM; i++)
- vdpasim_vq_reset(&vdpasim->vqs[i]);
+ for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
+ vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_reset(vdpasim->iommu);
@@ -156,80 +107,6 @@ static void vdpasim_reset(struct vdpasim *vdpasim)
++vdpasim->generation;
}
-static void vdpasim_work(struct work_struct *work)
-{
- struct vdpasim *vdpasim = container_of(work, struct
- vdpasim, work);
- struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
- struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
- ssize_t read, write;
- size_t total_write;
- int pkts = 0;
- int err;
-
- spin_lock(&vdpasim->lock);
-
- if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
- goto out;
-
- if (!txq->ready || !rxq->ready)
- goto out;
-
- while (true) {
- total_write = 0;
- err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL,
- &txq->head, GFP_ATOMIC);
- if (err <= 0)
- break;
-
- err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov,
- &rxq->head, GFP_ATOMIC);
- if (err <= 0) {
- vringh_complete_iotlb(&txq->vring, txq->head, 0);
- break;
- }
-
- while (true) {
- read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov,
- vdpasim->buffer,
- PAGE_SIZE);
- if (read <= 0)
- break;
-
- write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov,
- vdpasim->buffer, read);
- if (write <= 0)
- break;
-
- total_write += write;
- }
-
- /* Make sure data is wrote before advancing index */
- smp_wmb();
-
- vringh_complete_iotlb(&txq->vring, txq->head, 0);
- vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
-
- /* Make sure used is visible before rasing the interrupt. */
- smp_wmb();
-
- local_bh_disable();
- if (txq->cb)
- txq->cb(txq->private);
- if (rxq->cb)
- rxq->cb(rxq->private);
- local_bh_enable();
-
- if (++pkts > 4) {
- schedule_work(&vdpasim->work);
- goto out;
- }
- }
-
-out:
- spin_unlock(&vdpasim->lock);
-}
-
static int dir_to_perm(enum dma_data_direction dir)
{
int perm = -EFAULT;
@@ -342,26 +219,28 @@ static const struct dma_map_ops vdpasim_dma_ops = {
.free = vdpasim_free_coherent,
};
-static const struct vdpa_config_ops vdpasim_net_config_ops;
-static const struct vdpa_config_ops vdpasim_net_batch_config_ops;
+static const struct vdpa_config_ops vdpasim_config_ops;
+static const struct vdpa_config_ops vdpasim_batch_config_ops;
-static struct vdpasim *vdpasim_create(void)
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
{
const struct vdpa_config_ops *ops;
struct vdpasim *vdpasim;
struct device *dev;
- int ret = -ENOMEM;
+ int i, ret = -ENOMEM;
if (batch_mapping)
- ops = &vdpasim_net_batch_config_ops;
+ ops = &vdpasim_batch_config_ops;
else
- ops = &vdpasim_net_config_ops;
+ ops = &vdpasim_config_ops;
- vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
+ vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
+ dev_attr->nvqs);
if (!vdpasim)
goto err_alloc;
- INIT_WORK(&vdpasim->work, vdpasim_work);
+ vdpasim->dev_attr = *dev_attr;
+ INIT_WORK(&vdpasim->work, dev_attr->work_fn);
spin_lock_init(&vdpasim->lock);
spin_lock_init(&vdpasim->iommu_lock);
@@ -371,31 +250,27 @@ static struct vdpasim *vdpasim_create(void)
goto err_iommu;
set_dma_ops(dev, &vdpasim_dma_ops);
- vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
+ vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
+ if (!vdpasim->config)
+ goto err_iommu;
+
+ vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
+ GFP_KERNEL);
+ if (!vdpasim->vqs)
+ goto err_iommu;
+
+ vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
if (!vdpasim->iommu)
goto err_iommu;
- vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
if (!vdpasim->buffer)
goto err_iommu;
- if (macaddr) {
- mac_pton(macaddr, vdpasim->config.mac);
- if (!is_valid_ether_addr(vdpasim->config.mac)) {
- ret = -EADDRNOTAVAIL;
- goto err_iommu;
- }
- } else {
- eth_random_addr(vdpasim->config.mac);
- }
-
- vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
- vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
+ for (i = 0; i < dev_attr->nvqs; i++)
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
vdpasim->vdpa.dma_dev = dev;
- ret = vdpa_register_device(&vdpasim->vdpa);
- if (ret)
- goto err_iommu;
return vdpasim;
@@ -404,6 +279,7 @@ err_iommu:
err_alloc:
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(vdpasim_create);
static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
u64 desc_area, u64 driver_area,
@@ -498,28 +374,21 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
static u64 vdpasim_get_features(struct vdpa_device *vdpa)
{
- return vdpasim_features;
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->dev_attr.supported_features;
}
static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
- struct virtio_net_config *config = &vdpasim->config;
/* DMA mapping must be done by driver */
if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
return -EINVAL;
- vdpasim->features = features & vdpasim_features;
-
- /* We generally only know whether guest is using the legacy interface
- * here, so generally that's the earliest we can set config fields.
- * Note: We actually require VIRTIO_F_ACCESS_PLATFORM above which
- * implies VIRTIO_F_VERSION_1, but let's not try to be clever here.
- */
+ vdpasim->features = features & vdpasim->dev_attr.supported_features;
- config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
- config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
return 0;
}
@@ -536,7 +405,9 @@ static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
{
- return VDPASIM_DEVICE_ID;
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->dev_attr.id;
}
static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
@@ -572,14 +443,27 @@ static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
- if (offset + len < sizeof(struct virtio_net_config))
- memcpy(buf, (u8 *)&vdpasim->config + offset, len);
+ if (offset + len > vdpasim->dev_attr.config_size)
+ return;
+
+ if (vdpasim->dev_attr.get_config)
+ vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
+
+ memcpy(buf, vdpasim->config + offset, len);
}
static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
const void *buf, unsigned int len)
{
- /* No writable config supportted by vdpasim */
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ if (offset + len > vdpasim->dev_attr.config_size)
+ return;
+
+ memcpy(vdpasim->config + offset, buf, len);
+
+ if (vdpasim->dev_attr.set_config)
+ vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
}
static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
@@ -656,12 +540,14 @@ static void vdpasim_free(struct vdpa_device *vdpa)
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
cancel_work_sync(&vdpasim->work);
- kfree(vdpasim->buffer);
+ kvfree(vdpasim->buffer);
if (vdpasim->iommu)
vhost_iotlb_free(vdpasim->iommu);
+ kfree(vdpasim->vqs);
+ kfree(vdpasim->config);
}
-static const struct vdpa_config_ops vdpasim_net_config_ops = {
+static const struct vdpa_config_ops vdpasim_config_ops = {
.set_vq_address = vdpasim_set_vq_address,
.set_vq_num = vdpasim_set_vq_num,
.kick_vq = vdpasim_kick_vq,
@@ -688,7 +574,7 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = {
.free = vdpasim_free,
};
-static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
+static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_vq_address = vdpasim_set_vq_address,
.set_vq_num = vdpasim_set_vq_num,
.kick_vq = vdpasim_kick_vq,
@@ -714,26 +600,6 @@ static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
.free = vdpasim_free,
};
-static int __init vdpasim_dev_init(void)
-{
- vdpasim_dev = vdpasim_create();
-
- if (!IS_ERR(vdpasim_dev))
- return 0;
-
- return PTR_ERR(vdpasim_dev);
-}
-
-static void __exit vdpasim_dev_exit(void)
-{
- struct vdpa_device *vdpa = &vdpasim_dev->vdpa;
-
- vdpa_unregister_device(vdpa);
-}
-
-module_init(vdpasim_dev_init)
-module_exit(vdpasim_dev_exit)
-
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE(DRV_LICENSE);
MODULE_AUTHOR(DRV_AUTHOR);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
new file mode 100644
index 000000000000..b02142293d5b
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ */
+
+#ifndef _VDPA_SIM_H
+#define _VDPA_SIM_H
+
+#include <linux/vringh.h>
+#include <linux/vdpa.h>
+#include <linux/virtio_byteorder.h>
+#include <linux/vhost_iotlb.h>
+#include <uapi/linux/virtio_config.h>
+
+#define VDPASIM_FEATURES ((1ULL << VIRTIO_F_ANY_LAYOUT) | \
+ (1ULL << VIRTIO_F_VERSION_1) | \
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM))
+
+struct vdpasim;
+
+struct vdpasim_virtqueue {
+ struct vringh vring;
+ struct vringh_kiov in_iov;
+ struct vringh_kiov out_iov;
+ unsigned short head;
+ bool ready;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ u32 num;
+ void *private;
+ irqreturn_t (*cb)(void *data);
+};
+
+struct vdpasim_dev_attr {
+ u64 supported_features;
+ size_t config_size;
+ size_t buffer_size;
+ int nvqs;
+ u32 id;
+
+ work_func_t work_fn;
+ void (*get_config)(struct vdpasim *vdpasim, void *config);
+ void (*set_config)(struct vdpasim *vdpasim, const void *config);
+};
+
+/* State of each vdpasim device */
+struct vdpasim {
+ struct vdpa_device vdpa;
+ struct vdpasim_virtqueue *vqs;
+ struct work_struct work;
+ struct vdpasim_dev_attr dev_attr;
+ /* spinlock to synchronize virtqueue state */
+ spinlock_t lock;
+ /* virtio config according to device type */
+ void *config;
+ struct vhost_iotlb *iommu;
+ void *buffer;
+ u32 status;
+ u32 generation;
+ u64 features;
+ /* spinlock to synchronize iommu table */
+ spinlock_t iommu_lock;
+};
+
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *attr);
+
+/* TODO: cross-endian support */
+static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
+{
+ return virtio_legacy_is_little_endian() ||
+ (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
+}
+
+static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
+{
+ return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
+{
+ return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline u32 vdpasim32_to_cpu(struct vdpasim *vdpasim, __virtio32 val)
+{
+ return __virtio32_to_cpu(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline __virtio32 cpu_to_vdpasim32(struct vdpasim *vdpasim, u32 val)
+{
+ return __cpu_to_virtio32(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline u64 vdpasim64_to_cpu(struct vdpasim *vdpasim, __virtio64 val)
+{
+ return __virtio64_to_cpu(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline __virtio64 cpu_to_vdpasim64(struct vdpasim *vdpasim, u64 val)
+{
+ return __cpu_to_virtio64(vdpasim_is_little_endian(vdpasim), val);
+}
+
+#endif
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
new file mode 100644
index 000000000000..c10b6981fdab
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDPA simulator for networking device.
+ *
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/vringh.h>
+#include <linux/vdpa.h>
+#include <uapi/linux/virtio_net.h>
+
+#include "vdpa_sim.h"
+
+#define DRV_VERSION "0.1"
+#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define DRV_DESC "vDPA Device Simulator for networking device"
+#define DRV_LICENSE "GPL v2"
+
+#define VDPASIM_NET_FEATURES (VDPASIM_FEATURES | \
+ (1ULL << VIRTIO_NET_F_MAC))
+
+#define VDPASIM_NET_VQ_NUM 2
+
+static char *macaddr;
+module_param(macaddr, charp, 0);
+MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
+
+u8 macaddr_buf[ETH_ALEN];
+
+static struct vdpasim *vdpasim_net_dev;
+
+static void vdpasim_net_work(struct work_struct *work)
+{
+ struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
+ struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
+ struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
+ ssize_t read, write;
+ size_t total_write;
+ int pkts = 0;
+ int err;
+
+ spin_lock(&vdpasim->lock);
+
+ if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ goto out;
+
+ if (!txq->ready || !rxq->ready)
+ goto out;
+
+ while (true) {
+ total_write = 0;
+ err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL,
+ &txq->head, GFP_ATOMIC);
+ if (err <= 0)
+ break;
+
+ err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov,
+ &rxq->head, GFP_ATOMIC);
+ if (err <= 0) {
+ vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ break;
+ }
+
+ while (true) {
+ read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
+ vdpasim->buffer,
+ PAGE_SIZE);
+ if (read <= 0)
+ break;
+
+ write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
+ vdpasim->buffer, read);
+ if (write <= 0)
+ break;
+
+ total_write += write;
+ }
+
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
+
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (vringh_need_notify_iotlb(&txq->vring) > 0)
+ vringh_notify(&txq->vring);
+ if (vringh_need_notify_iotlb(&rxq->vring) > 0)
+ vringh_notify(&rxq->vring);
+ local_bh_enable();
+
+ if (++pkts > 4) {
+ schedule_work(&vdpasim->work);
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock(&vdpasim->lock);
+}
+
+static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
+{
+ struct virtio_net_config *net_config =
+ (struct virtio_net_config *)config;
+
+ net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
+ net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
+ memcpy(net_config->mac, macaddr_buf, ETH_ALEN);
+}
+
+static int __init vdpasim_net_init(void)
+{
+ struct vdpasim_dev_attr dev_attr = {};
+ int ret;
+
+ if (macaddr) {
+ mac_pton(macaddr, macaddr_buf);
+ if (!is_valid_ether_addr(macaddr_buf)) {
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+ } else {
+ eth_random_addr(macaddr_buf);
+ }
+
+ dev_attr.id = VIRTIO_ID_NET;
+ dev_attr.supported_features = VDPASIM_NET_FEATURES;
+ dev_attr.nvqs = VDPASIM_NET_VQ_NUM;
+ dev_attr.config_size = sizeof(struct virtio_net_config);
+ dev_attr.get_config = vdpasim_net_get_config;
+ dev_attr.work_fn = vdpasim_net_work;
+ dev_attr.buffer_size = PAGE_SIZE;
+
+ vdpasim_net_dev = vdpasim_create(&dev_attr);
+ if (IS_ERR(vdpasim_net_dev)) {
+ ret = PTR_ERR(vdpasim_net_dev);
+ goto out;
+ }
+
+ ret = vdpa_register_device(&vdpasim_net_dev->vdpa);
+ if (ret)
+ goto put_dev;
+
+ return 0;
+
+put_dev:
+ put_device(&vdpasim_net_dev->vdpa.dev);
+out:
+ return ret;
+}
+
+static void __exit vdpasim_net_exit(void)
+{
+ struct vdpa_device *vdpa = &vdpasim_net_dev->vdpa;
+
+ vdpa_unregister_device(vdpa);
+}
+
+module_init(vdpasim_net_init);
+module_exit(vdpasim_net_exit);
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE(DRV_LICENSE);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index b558d4cfd082..6de97d25a3f8 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -154,6 +154,10 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
if (!dev)
return -EINVAL;
+ /* Not mandatory, but its absence could be a problem */
+ if (!ops->request)
+ dev_info(dev, "Driver cannot be asked to release device\n");
+
mutex_lock(&parent_list_lock);
/* Check for duplicate */
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index 30964a4e0a28..b52eea128549 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -98,6 +98,18 @@ static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma)
return parent->ops->mmap(mdev, vma);
}
+static void vfio_mdev_request(void *device_data, unsigned int count)
+{
+ struct mdev_device *mdev = device_data;
+ struct mdev_parent *parent = mdev->parent;
+
+ if (parent->ops->request)
+ parent->ops->request(mdev, count);
+ else if (count == 0)
+ dev_notice(mdev_dev(mdev),
+ "No mdev vendor driver request callback support, blocked until released by user\n");
+}
+
static const struct vfio_device_ops vfio_mdev_dev_ops = {
.name = "vfio-mdev",
.open = vfio_mdev_open,
@@ -106,6 +118,7 @@ static const struct vfio_device_ops vfio_mdev_dev_ops = {
.read = vfio_mdev_read,
.write = vfio_mdev_write,
.mmap = vfio_mdev_mmap,
+ .request = vfio_mdev_request,
};
static int vfio_mdev_probe(struct device *dev)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index e6190173482c..706de3ef94bb 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -161,8 +161,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
int i;
struct vfio_pci_dummy_resource *dummy_res;
- INIT_LIST_HEAD(&vdev->dummy_resources_list);
-
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
int bar = i + PCI_STD_RESOURCES;
@@ -1635,8 +1633,8 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
mutex_unlock(&vdev->vma_lock);
- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
ret = VM_FAULT_SIGBUS;
up_out:
@@ -1966,6 +1964,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock);
+ INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list);
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index 65c61710c0e9..9adcf6a8f888 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -231,7 +231,7 @@ int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
return -EINVAL;
if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
- return -EINVAL;
+ return -ENODEV;
mem_node = of_find_node_by_phandle(mem_phandle);
if (!mem_node)
@@ -393,7 +393,7 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
int ret;
struct vfio_pci_npu2_data *data;
struct device_node *nvlink_dn;
- u32 nvlink_index = 0;
+ u32 nvlink_index = 0, mem_phandle = 0;
struct pci_dev *npdev = vdev->pdev;
struct device_node *npu_node = pci_device_to_OF_node(npdev);
struct pci_controller *hose = pci_bus_to_host(npdev->bus);
@@ -408,6 +408,9 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
if (!pnv_pci_get_gpu_dev(vdev->pdev))
return -ENODEV;
+ if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
+ return -ENODEV;
+
/*
* NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
* so we can allocate one register per link, using nvlink index as
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2151bc7f87ab..4ad8a35667a7 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -2331,6 +2331,24 @@ int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
}
EXPORT_SYMBOL(vfio_unregister_notifier);
+struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+
+ if (!group)
+ return ERR_PTR(-EINVAL);
+
+ container = group->container;
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->group_iommu_domain))
+ return driver->ops->group_iommu_domain(container->iommu_data,
+ group->iommu_group);
+
+ return ERR_PTR(-ENOTTY);
+}
+EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
+
/**
* Module/class support
*/
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 67e827638995..0b4dedaa9128 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -2980,6 +2980,29 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
return ret;
}
+static struct iommu_domain *
+vfio_iommu_type1_group_iommu_domain(void *iommu_data,
+ struct iommu_group *iommu_group)
+{
+ struct iommu_domain *domain = ERR_PTR(-ENODEV);
+ struct vfio_iommu *iommu = iommu_data;
+ struct vfio_domain *d;
+
+ if (!iommu || !iommu_group)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&iommu->lock);
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ if (find_iommu_group(d, iommu_group)) {
+ domain = d->domain;
+ break;
+ }
+ }
+ mutex_unlock(&iommu->lock);
+
+ return domain;
+}
+
static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.name = "vfio-iommu-type1",
.owner = THIS_MODULE,
@@ -2993,6 +3016,7 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.register_notifier = vfio_iommu_type1_register_notifier,
.unregister_notifier = vfio_iommu_type1_unregister_notifier,
.dma_rw = vfio_iommu_type1_dma_rw,
+ .group_iommu_domain = vfio_iommu_type1_group_iommu_domain,
};
static int __init vfio_iommu_type1_init(void)
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 997cb5d0a657..414e98d82b02 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -46,6 +46,9 @@ static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void
__poll_t flags = key_to_poll(key);
if (flags & EPOLLIN) {
+ u64 cnt;
+ eventfd_ctx_do_read(virqfd->eventfd, &cnt);
+
/* An event has been signaled, call function */
if ((!virqfd->handler ||
virqfd->handler(virqfd->opaque, virqfd->data)) &&
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 531a00d703cd..c8784dfafdd7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -863,6 +863,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *ubufs;
+ struct ubuf_info *ubuf;
bool zcopy_used;
int sent_pkts = 0;
@@ -895,9 +896,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
- struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
-
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
@@ -927,7 +926,8 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- vhost_net_ubuf_put(ubufs);
+ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6ff8a5096691..4ce9f00ae10e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1643,7 +1643,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
if (!vhost_vq_is_setup(vq))
continue;
- if (vhost_scsi_setup_vq_cmds(vq, vq->num))
+ ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
+ if (ret)
goto destroy_vq_cmds;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 29ed4173f04e..ef688c8c0e0e 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -245,14 +245,10 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
return -EFAULT;
if (vhost_vdpa_config_validate(v, &config))
return -EINVAL;
- buf = kvzalloc(config.len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- if (copy_from_user(buf, c->buf, config.len)) {
- kvfree(buf);
- return -EFAULT;
- }
+ buf = vmemdup_user(c->buf, config.len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
ops->set_config(vdpa, config.off, buf, config.len);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a483cec31d5c..5e78fb719602 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -30,7 +30,12 @@
#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
- VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+ VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
/* Used to track all the vhost_vsock instances on the system. */
@@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
@@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&vsock->dev, vq);
do {
u32 len;
@@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
mutex_lock(&vsock->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&vsock->dev)) {
- mutex_unlock(&vsock->dev.mutex);
- return -EFAULT;
+ goto err;
+ }
+
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+ if (vhost_init_device_iotlb(&vsock->dev, true))
+ goto err;
}
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
}
mutex_unlock(&vsock->dev.mutex);
return 0;
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
}
static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
return vhost_vsock_set_features(vsock, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VSOCK_BACKEND_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&vsock->dev, features);
+ return 0;
default:
mutex_lock(&vsock->dev.mutex);
r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
}
}
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
@@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .read_iter = vhost_vsock_chr_read_iter,
+ .write_iter = vhost_vsock_chr_write_iter,
+ .poll = vhost_vsock_chr_poll,
};
static struct miscdevice vhost_vsock_misc = {
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index dfc760830eb9..e48fded3e414 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -230,8 +230,7 @@ static int pwm_backlight_parse_dt(struct device *dev,
struct platform_pwm_backlight_data *data)
{
struct device_node *node = dev->of_node;
- unsigned int num_levels = 0;
- unsigned int levels_count;
+ unsigned int num_levels;
unsigned int num_steps = 0;
struct property *prop;
unsigned int *table;
@@ -260,12 +259,11 @@ static int pwm_backlight_parse_dt(struct device *dev,
if (!prop)
return 0;
- data->max_brightness = length / sizeof(u32);
+ num_levels = length / sizeof(u32);
/* read brightness levels from DT property */
- if (data->max_brightness > 0) {
- size_t size = sizeof(*data->levels) * data->max_brightness;
- unsigned int i, j, n = 0;
+ if (num_levels > 0) {
+ size_t size = sizeof(*data->levels) * num_levels;
data->levels = devm_kzalloc(dev, size, GFP_KERNEL);
if (!data->levels)
@@ -273,7 +271,7 @@ static int pwm_backlight_parse_dt(struct device *dev,
ret = of_property_read_u32_array(node, "brightness-levels",
data->levels,
- data->max_brightness);
+ num_levels);
if (ret < 0)
return ret;
@@ -298,7 +296,13 @@ static int pwm_backlight_parse_dt(struct device *dev,
* between two points.
*/
if (num_steps) {
- if (data->max_brightness < 2) {
+ unsigned int num_input_levels = num_levels;
+ unsigned int i;
+ u32 x1, x2, x, dx;
+ u32 y1, y2;
+ s64 dy;
+
+ if (num_input_levels < 2) {
dev_err(dev, "can't interpolate\n");
return -EINVAL;
}
@@ -308,14 +312,7 @@ static int pwm_backlight_parse_dt(struct device *dev,
* taking in consideration the number of interpolated
* steps between two levels.
*/
- for (i = 0; i < data->max_brightness - 1; i++) {
- if ((data->levels[i + 1] - data->levels[i]) /
- num_steps)
- num_levels += num_steps;
- else
- num_levels++;
- }
- num_levels++;
+ num_levels = (num_input_levels - 1) * num_steps + 1;
dev_dbg(dev, "new number of brightness levels: %d\n",
num_levels);
@@ -327,24 +324,25 @@ static int pwm_backlight_parse_dt(struct device *dev,
table = devm_kzalloc(dev, size, GFP_KERNEL);
if (!table)
return -ENOMEM;
-
- /* Fill the interpolated table. */
- levels_count = 0;
- for (i = 0; i < data->max_brightness - 1; i++) {
- value = data->levels[i];
- n = (data->levels[i + 1] - value) / num_steps;
- if (n > 0) {
- for (j = 0; j < num_steps; j++) {
- table[levels_count] = value;
- value += n;
- levels_count++;
- }
- } else {
- table[levels_count] = data->levels[i];
- levels_count++;
+ /*
+ * Fill the interpolated table[x] = y
+ * by draw lines between each (x1, y1) to (x2, y2).
+ */
+ dx = num_steps;
+ for (i = 0; i < num_input_levels - 1; i++) {
+ x1 = i * dx;
+ x2 = x1 + dx;
+ y1 = data->levels[i];
+ y2 = data->levels[i + 1];
+ dy = (s64)y2 - y1;
+
+ for (x = x1; x < x2; x++) {
+ table[x] = y1 +
+ div_s64(dy * (x - x1), dx);
}
}
- table[levels_count] = data->levels[i];
+ /* Fill in the last point, since no line starts here. */
+ table[x2] = y2;
/*
* As we use interpolation lets remove current
@@ -353,15 +351,9 @@ static int pwm_backlight_parse_dt(struct device *dev,
*/
devm_kfree(dev, data->levels);
data->levels = table;
-
- /*
- * Reassign max_brightness value to the new total number
- * of brightness levels.
- */
- data->max_brightness = num_levels;
}
- data->max_brightness--;
+ data->max_brightness = num_levels - 1;
}
return 0;
diff --git a/drivers/video/fbdev/geode/lxfb_ops.c b/drivers/video/fbdev/geode/lxfb_ops.c
index b3a041fce570..32baaf59fcf7 100644
--- a/drivers/video/fbdev/geode/lxfb_ops.c
+++ b/drivers/video/fbdev/geode/lxfb_ops.c
@@ -682,6 +682,7 @@ static void lx_restore_display_ctlr(struct lxfb_par *par)
case DC_DV_CTL:
/* set all ram to dirty */
write_dc(par, i, par->dc[i] | DC_DV_CTL_CLEAR_DV_RAM);
+ break;
case DC_RSVD_1:
case DC_RSVD_2:
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 0642555289e0..27893fa139b0 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -239,6 +239,7 @@ static u32 to3264(u32 timing, int bpp, int is64)
fallthrough;
case 16:
timing >>= 1;
+ fallthrough;
case 32:
break;
}
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 203c254f8f6c..2fe08b67eda7 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -1208,7 +1208,7 @@ err:
return retval;
}
-static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
+static void ps3fb_shutdown(struct ps3_system_bus_device *dev)
{
struct fb_info *info = ps3_system_bus_get_drvdata(dev);
u64 xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb_videomemory.address));
@@ -1241,8 +1241,6 @@ static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
lv1_gpu_memory_free(ps3fb.memory_handle);
ps3_close_hv_device(dev);
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
-
- return 0;
}
static struct ps3_system_bus_driver ps3fb_driver = {
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 181e2f18beae..9fc9ec4a25f5 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -27,20 +27,74 @@ static bool unplug_online = true;
module_param(unplug_online, bool, 0644);
MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
-enum virtio_mem_mb_state {
+static bool force_bbm;
+module_param(force_bbm, bool, 0444);
+MODULE_PARM_DESC(force_bbm,
+ "Force Big Block Mode. Default is 0 (auto-selection)");
+
+static unsigned long bbm_block_size;
+module_param(bbm_block_size, ulong, 0444);
+MODULE_PARM_DESC(bbm_block_size,
+ "Big Block size in bytes. Default is 0 (auto-detection).");
+
+static bool bbm_safe_unplug = true;
+module_param(bbm_safe_unplug, bool, 0444);
+MODULE_PARM_DESC(bbm_safe_unplug,
+ "Use a safe unplug mechanism in BBM, avoiding long/endless loops");
+
+/*
+ * virtio-mem currently supports the following modes of operation:
+ *
+ * * Sub Block Mode (SBM): A Linux memory block spans 2..X subblocks (SB). The
+ * size of a Sub Block (SB) is determined based on the device block size, the
+ * pageblock size, and the maximum allocation granularity of the buddy.
+ * Subblocks within a Linux memory block might either be plugged or unplugged.
+ * Memory is added/removed to Linux MM in Linux memory block granularity.
+ *
+ * * Big Block Mode (BBM): A Big Block (BB) spans 1..X Linux memory blocks.
+ * Memory is added/removed to Linux MM in Big Block granularity.
+ *
+ * The mode is determined automatically based on the Linux memory block size
+ * and the device block size.
+ *
+ * User space / core MM (auto onlining) is responsible for onlining added
+ * Linux memory blocks - and for selecting a zone. Linux Memory Blocks are
+ * always onlined separately, and all memory within a Linux memory block is
+ * onlined to the same zone - virtio-mem relies on this behavior.
+ */
+
+/*
+ * State of a Linux memory block in SBM.
+ */
+enum virtio_mem_sbm_mb_state {
/* Unplugged, not added to Linux. Can be reused later. */
- VIRTIO_MEM_MB_STATE_UNUSED = 0,
+ VIRTIO_MEM_SBM_MB_UNUSED = 0,
/* (Partially) plugged, not added to Linux. Error on add_memory(). */
- VIRTIO_MEM_MB_STATE_PLUGGED,
+ VIRTIO_MEM_SBM_MB_PLUGGED,
/* Fully plugged, fully added to Linux, offline. */
- VIRTIO_MEM_MB_STATE_OFFLINE,
+ VIRTIO_MEM_SBM_MB_OFFLINE,
/* Partially plugged, fully added to Linux, offline. */
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
/* Fully plugged, fully added to Linux, online. */
- VIRTIO_MEM_MB_STATE_ONLINE,
+ VIRTIO_MEM_SBM_MB_ONLINE,
/* Partially plugged, fully added to Linux, online. */
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL,
- VIRTIO_MEM_MB_STATE_COUNT
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL,
+ VIRTIO_MEM_SBM_MB_COUNT
+};
+
+/*
+ * State of a Big Block (BB) in BBM, covering 1..X Linux memory blocks.
+ */
+enum virtio_mem_bbm_bb_state {
+ /* Unplugged, not added to Linux. Can be reused later. */
+ VIRTIO_MEM_BBM_BB_UNUSED = 0,
+ /* Plugged, not added to Linux. Error on add_memory(). */
+ VIRTIO_MEM_BBM_BB_PLUGGED,
+ /* Plugged and added to Linux. */
+ VIRTIO_MEM_BBM_BB_ADDED,
+ /* All online parts are fake-offline, ready to remove. */
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE,
+ VIRTIO_MEM_BBM_BB_COUNT
};
struct virtio_mem {
@@ -51,6 +105,7 @@ struct virtio_mem {
/* Workqueue that processes the plug/unplug requests. */
struct work_struct wq;
+ atomic_t wq_active;
atomic_t config_changed;
/* Virtqueue for guest->host requests. */
@@ -70,27 +125,13 @@ struct virtio_mem {
/* The device block size (for communicating with the device). */
uint64_t device_block_size;
- /* The translated node id. NUMA_NO_NODE in case not specified. */
+ /* The determined node id for all memory of the device. */
int nid;
/* Physical start address of the memory region. */
uint64_t addr;
/* Maximum region size in bytes. */
uint64_t region_size;
- /* The subblock size. */
- uint64_t subblock_size;
- /* The number of subblocks per memory block. */
- uint32_t nb_sb_per_mb;
-
- /* Id of the first memory block of this device. */
- unsigned long first_mb_id;
- /* Id of the last memory block of this device. */
- unsigned long last_mb_id;
- /* Id of the last usable memory block of this device. */
- unsigned long last_usable_mb_id;
- /* Id of the next memory bock to prepare when needed. */
- unsigned long next_mb_id;
-
/* The parent resource for all memory added via this device. */
struct resource *parent_resource;
/*
@@ -99,31 +140,79 @@ struct virtio_mem {
*/
const char *resource_name;
- /* Summary of all memory block states. */
- unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
-#define VIRTIO_MEM_NB_OFFLINE_THRESHOLD 10
-
- /*
- * One byte state per memory block.
- *
- * Allocated via vmalloc(). When preparing new blocks, resized
- * (alloc+copy+free) when needed (crossing pages with the next mb).
- * (when crossing pages).
- *
- * With 128MB memory blocks, we have states for 512GB of memory in one
- * page.
- */
- uint8_t *mb_state;
-
/*
- * $nb_sb_per_mb bit per memory block. Handled similar to mb_state.
- *
- * With 4MB subblocks, we manage 128GB of memory in one page.
+ * We don't want to add too much memory if it's not getting onlined,
+ * to avoid running OOM. Besides this threshold, we allow to have at
+ * least two offline blocks at a time (whatever is bigger).
*/
- unsigned long *sb_bitmap;
+#define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024)
+ atomic64_t offline_size;
+ uint64_t offline_threshold;
+
+ /* If set, the driver is in SBM, otherwise in BBM. */
+ bool in_sbm;
+
+ union {
+ struct {
+ /* Id of the first memory block of this device. */
+ unsigned long first_mb_id;
+ /* Id of the last usable memory block of this device. */
+ unsigned long last_usable_mb_id;
+ /* Id of the next memory bock to prepare when needed. */
+ unsigned long next_mb_id;
+
+ /* The subblock size. */
+ uint64_t sb_size;
+ /* The number of subblocks per Linux memory block. */
+ uint32_t sbs_per_mb;
+
+ /* Summary of all memory block states. */
+ unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
+
+ /*
+ * One byte state per memory block. Allocated via
+ * vmalloc(). Resized (alloc+copy+free) on demand.
+ *
+ * With 128 MiB memory blocks, we have states for 512
+ * GiB of memory in one 4 KiB page.
+ */
+ uint8_t *mb_states;
+
+ /*
+ * Bitmap: one bit per subblock. Allocated similar to
+ * sbm.mb_states.
+ *
+ * A set bit means the corresponding subblock is
+ * plugged, otherwise it's unblocked.
+ *
+ * With 4 MiB subblocks, we manage 128 GiB of memory
+ * in one 4 KiB page.
+ */
+ unsigned long *sb_states;
+ } sbm;
+
+ struct {
+ /* Id of the first big block of this device. */
+ unsigned long first_bb_id;
+ /* Id of the last usable big block of this device. */
+ unsigned long last_usable_bb_id;
+ /* Id of the next device bock to prepare when needed. */
+ unsigned long next_bb_id;
+
+ /* Summary of all big block states. */
+ unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT];
+
+ /* One byte state per big block. See sbm.mb_states. */
+ uint8_t *bb_states;
+
+ /* The block size used for plugging/adding/removing. */
+ uint64_t bb_size;
+ } bbm;
+ };
/*
- * Mutex that protects the nb_mb_state, mb_state, and sb_bitmap.
+ * Mutex that protects the sbm.mb_count, sbm.mb_states,
+ * sbm.sb_states, bbm.bb_count, and bbm.bb_states
*
* When this lock is held the pointers can't change, ONLINE and
* OFFLINE blocks can't change the state and no subblocks will get
@@ -160,6 +249,11 @@ static DEFINE_MUTEX(virtio_mem_mutex);
static LIST_HEAD(virtio_mem_devices);
static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
+static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
+ unsigned long nr_pages);
+static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
+ unsigned long nr_pages);
+static void virtio_mem_retry(struct virtio_mem *vm);
/*
* Register a virtio-mem device so it will be considered for the online_page
@@ -213,6 +307,24 @@ static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
}
/*
+ * Calculate the big block id of a given address.
+ */
+static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm,
+ uint64_t addr)
+{
+ return addr / vm->bbm.bb_size;
+}
+
+/*
+ * Calculate the physical start address of a given big block id.
+ */
+static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ return bb_id * vm->bbm.bb_size;
+}
+
+/*
* Calculate the subblock id of a given address.
*/
static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
@@ -221,89 +333,164 @@ static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
- return (addr - mb_addr) / vm->subblock_size;
+ return (addr - mb_addr) / vm->sbm.sb_size;
+}
+
+/*
+ * Set the state of a big block, taking care of the state counter.
+ */
+static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm,
+ unsigned long bb_id,
+ enum virtio_mem_bbm_bb_state state)
+{
+ const unsigned long idx = bb_id - vm->bbm.first_bb_id;
+ enum virtio_mem_bbm_bb_state old_state;
+
+ old_state = vm->bbm.bb_states[idx];
+ vm->bbm.bb_states[idx] = state;
+
+ BUG_ON(vm->bbm.bb_count[old_state] == 0);
+ vm->bbm.bb_count[old_state]--;
+ vm->bbm.bb_count[state]++;
+}
+
+/*
+ * Get the state of a big block.
+ */
+static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id];
+}
+
+/*
+ * Prepare the big block state array for the next big block.
+ */
+static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
+{
+ unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id;
+ unsigned long new_bytes = old_bytes + 1;
+ int old_pages = PFN_UP(old_bytes);
+ int new_pages = PFN_UP(new_bytes);
+ uint8_t *new_array;
+
+ if (vm->bbm.bb_states && old_pages == new_pages)
+ return 0;
+
+ new_array = vzalloc(new_pages * PAGE_SIZE);
+ if (!new_array)
+ return -ENOMEM;
+
+ mutex_lock(&vm->hotplug_mutex);
+ if (vm->bbm.bb_states)
+ memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
+ vfree(vm->bbm.bb_states);
+ vm->bbm.bb_states = new_array;
+ mutex_unlock(&vm->hotplug_mutex);
+
+ return 0;
}
+#define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \
+ for (_bb_id = vm->bbm.first_bb_id; \
+ _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
+ _bb_id++) \
+ if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
+
+#define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \
+ for (_bb_id = vm->bbm.next_bb_id - 1; \
+ _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
+ _bb_id--) \
+ if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
+
/*
* Set the state of a memory block, taking care of the state counter.
*/
-static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id,
- enum virtio_mem_mb_state state)
+static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
+ unsigned long mb_id, uint8_t state)
{
- const unsigned long idx = mb_id - vm->first_mb_id;
- enum virtio_mem_mb_state old_state;
+ const unsigned long idx = mb_id - vm->sbm.first_mb_id;
+ uint8_t old_state;
- old_state = vm->mb_state[idx];
- vm->mb_state[idx] = state;
+ old_state = vm->sbm.mb_states[idx];
+ vm->sbm.mb_states[idx] = state;
- BUG_ON(vm->nb_mb_state[old_state] == 0);
- vm->nb_mb_state[old_state]--;
- vm->nb_mb_state[state]++;
+ BUG_ON(vm->sbm.mb_count[old_state] == 0);
+ vm->sbm.mb_count[old_state]--;
+ vm->sbm.mb_count[state]++;
}
/*
* Get the state of a memory block.
*/
-static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm,
- unsigned long mb_id)
+static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- const unsigned long idx = mb_id - vm->first_mb_id;
+ const unsigned long idx = mb_id - vm->sbm.first_mb_id;
- return vm->mb_state[idx];
+ return vm->sbm.mb_states[idx];
}
/*
* Prepare the state array for the next memory block.
*/
-static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm)
+static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
{
- unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1;
- unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2;
- int old_pages = PFN_UP(old_bytes);
- int new_pages = PFN_UP(new_bytes);
- uint8_t *new_mb_state;
+ int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
+ int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
+ uint8_t *new_array;
- if (vm->mb_state && old_pages == new_pages)
+ if (vm->sbm.mb_states && old_pages == new_pages)
return 0;
- new_mb_state = vzalloc(new_pages * PAGE_SIZE);
- if (!new_mb_state)
+ new_array = vzalloc(new_pages * PAGE_SIZE);
+ if (!new_array)
return -ENOMEM;
mutex_lock(&vm->hotplug_mutex);
- if (vm->mb_state)
- memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE);
- vfree(vm->mb_state);
- vm->mb_state = new_mb_state;
+ if (vm->sbm.mb_states)
+ memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
+ vfree(vm->sbm.mb_states);
+ vm->sbm.mb_states = new_array;
mutex_unlock(&vm->hotplug_mutex);
return 0;
}
-#define virtio_mem_for_each_mb_state(_vm, _mb_id, _state) \
- for (_mb_id = _vm->first_mb_id; \
- _mb_id < _vm->next_mb_id && _vm->nb_mb_state[_state]; \
+#define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \
+ for (_mb_id = _vm->sbm.first_mb_id; \
+ _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \
_mb_id++) \
- if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
+ if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
-#define virtio_mem_for_each_mb_state_rev(_vm, _mb_id, _state) \
- for (_mb_id = _vm->next_mb_id - 1; \
- _mb_id >= _vm->first_mb_id && _vm->nb_mb_state[_state]; \
+#define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \
+ for (_mb_id = _vm->sbm.next_mb_id - 1; \
+ _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \
_mb_id--) \
- if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
+ if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
+
+/*
+ * Calculate the bit number in the subblock bitmap for the given subblock
+ * inside the given memory block.
+ */
+static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id)
+{
+ return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
+}
/*
* Mark all selected subblocks plugged.
*
* Will not modify the state of the memory block.
*/
-static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
- __bitmap_set(vm->sb_bitmap, bit, count);
+ __bitmap_set(vm->sbm.sb_states, bit, count);
}
/*
@@ -311,105 +498,114 @@ static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
*
* Will not modify the state of the memory block.
*/
-static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
- __bitmap_clear(vm->sb_bitmap, bit, count);
+ __bitmap_clear(vm->sbm.sb_states, bit, count);
}
/*
* Test if all selected subblocks are plugged.
*/
-static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
if (count == 1)
- return test_bit(bit, vm->sb_bitmap);
+ return test_bit(bit, vm->sbm.sb_states);
/* TODO: Helper similar to bitmap_set() */
- return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
+ return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
bit + count;
}
/*
* Test if all selected subblocks are unplugged.
*/
-static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
/* TODO: Helper similar to bitmap_set() */
- return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
+ return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
+ bit + count;
}
/*
- * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
+ * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
* none.
*/
-static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
+static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
unsigned long mb_id)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
- return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
- bit;
+ return find_next_zero_bit(vm->sbm.sb_states,
+ bit + vm->sbm.sbs_per_mb, bit) - bit;
}
/*
* Prepare the subblock bitmap for the next memory block.
*/
-static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
+static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
{
- const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
- const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
- const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
+ const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
+ const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
+ const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
- unsigned long *new_sb_bitmap, *old_sb_bitmap;
+ unsigned long *new_bitmap, *old_bitmap;
- if (vm->sb_bitmap && old_pages == new_pages)
+ if (vm->sbm.sb_states && old_pages == new_pages)
return 0;
- new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE);
- if (!new_sb_bitmap)
+ new_bitmap = vzalloc(new_pages * PAGE_SIZE);
+ if (!new_bitmap)
return -ENOMEM;
mutex_lock(&vm->hotplug_mutex);
- if (new_sb_bitmap)
- memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
+ if (new_bitmap)
+ memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
- old_sb_bitmap = vm->sb_bitmap;
- vm->sb_bitmap = new_sb_bitmap;
+ old_bitmap = vm->sbm.sb_states;
+ vm->sbm.sb_states = new_bitmap;
mutex_unlock(&vm->hotplug_mutex);
- vfree(old_sb_bitmap);
+ vfree(old_bitmap);
return 0;
}
/*
- * Try to add a memory block to Linux. This will usually only fail
- * if out of memory.
+ * Test if we could add memory without creating too much offline memory -
+ * to avoid running OOM if memory is getting onlined deferred.
+ */
+static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
+{
+ if (WARN_ON_ONCE(size > vm->offline_threshold))
+ return false;
+
+ return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
+}
+
+/*
+ * Try adding memory to Linux. Will usually only fail if out of memory.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
* onlining code).
*
- * Will not modify the state of the memory block.
+ * Will not modify the state of memory blocks in virtio-mem.
*/
-static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
+static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
+ uint64_t size)
{
- const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
- int nid = vm->nid;
-
- if (nid == NUMA_NO_NODE)
- nid = memory_add_physaddr_to_nid(addr);
+ int rc;
/*
* When force-unloading the driver and we still have memory added to
@@ -422,53 +618,155 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
return -ENOMEM;
}
- dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
- return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
- vm->resource_name,
- MEMHP_MERGE_RESOURCE);
+ dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+ /* Memory might get onlined immediately. */
+ atomic64_add(size, &vm->offline_size);
+ rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name,
+ MEMHP_MERGE_RESOURCE);
+ if (rc) {
+ atomic64_sub(size, &vm->offline_size);
+ dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
+ /*
+ * TODO: Linux MM does not properly clean up yet in all cases
+ * where adding of memory failed - especially on -ENOMEM.
+ */
+ }
+ return rc;
+}
+
+/*
+ * See virtio_mem_add_memory(): Try adding a single Linux memory block.
+ */
+static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id)
+{
+ const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
+ const uint64_t size = memory_block_size_bytes();
+
+ return virtio_mem_add_memory(vm, addr, size);
+}
+
+/*
+ * See virtio_mem_add_memory(): Try adding a big block.
+ */
+static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
+
+ return virtio_mem_add_memory(vm, addr, size);
}
/*
- * Try to remove a memory block from Linux. Will only fail if the memory block
- * is not offline.
+ * Try removing memory from Linux. Will only fail if memory blocks aren't
+ * offline.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
* onlining code).
*
- * Will not modify the state of the memory block.
+ * Will not modify the state of memory blocks in virtio-mem.
*/
-static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
+static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
+ uint64_t size)
+{
+ int rc;
+
+ dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+ rc = remove_memory(vm->nid, addr, size);
+ if (!rc) {
+ atomic64_sub(size, &vm->offline_size);
+ /*
+ * We might have freed up memory we can now unplug, retry
+ * immediately instead of waiting.
+ */
+ virtio_mem_retry(vm);
+ } else {
+ dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc);
+ }
+ return rc;
+}
+
+/*
+ * See virtio_mem_remove_memory(): Try removing a single Linux memory block.
+ */
+static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
- int nid = vm->nid;
+ const uint64_t size = memory_block_size_bytes();
- if (nid == NUMA_NO_NODE)
- nid = memory_add_physaddr_to_nid(addr);
+ return virtio_mem_remove_memory(vm, addr, size);
+}
- dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
- return remove_memory(nid, addr, memory_block_size_bytes());
+/*
+ * See virtio_mem_remove_memory(): Try to remove all Linux memory blocks covered
+ * by the big block.
+ */
+static int virtio_mem_bbm_remove_bb(struct virtio_mem *vm, unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
+
+ return virtio_mem_remove_memory(vm, addr, size);
}
/*
- * Try to offline and remove a memory block from Linux.
+ * Try offlining and removing memory from Linux.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
* onlining code).
*
- * Will not modify the state of the memory block.
+ * Will not modify the state of memory blocks in virtio-mem.
*/
-static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
- unsigned long mb_id)
+static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
+ uint64_t addr,
+ uint64_t size)
+{
+ int rc;
+
+ dev_dbg(&vm->vdev->dev,
+ "offlining and removing memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+
+ rc = offline_and_remove_memory(vm->nid, addr, size);
+ if (!rc) {
+ atomic64_sub(size, &vm->offline_size);
+ /*
+ * We might have freed up memory we can now unplug, retry
+ * immediately instead of waiting.
+ */
+ virtio_mem_retry(vm);
+ } else {
+ dev_dbg(&vm->vdev->dev,
+ "offlining and removing memory failed: %d\n", rc);
+ }
+ return rc;
+}
+
+/*
+ * See virtio_mem_offline_and_remove_memory(): Try offlining and removing
+ * a single Linux memory block.
+ */
+static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
+ unsigned long mb_id)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
- int nid = vm->nid;
+ const uint64_t size = memory_block_size_bytes();
+
+ return virtio_mem_offline_and_remove_memory(vm, addr, size);
+}
- if (nid == NUMA_NO_NODE)
- nid = memory_add_physaddr_to_nid(addr);
+/*
+ * See virtio_mem_offline_and_remove_memory(): Try to offline and remove a
+ * all Linux memory blocks covered by the big block.
+ */
+static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
- dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
- mb_id);
- return offline_and_remove_memory(nid, addr, memory_block_size_bytes());
+ return virtio_mem_offline_and_remove_memory(vm, addr, size);
}
/*
@@ -499,31 +797,28 @@ static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
* Test if a virtio-mem device overlaps with the given range. Can be called
* from (notifier) callbacks lockless.
*/
-static bool virtio_mem_overlaps_range(struct virtio_mem *vm,
- unsigned long start, unsigned long size)
+static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
+ uint64_t size)
{
- unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id);
- unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) +
- memory_block_size_bytes();
-
- return start < dev_end && dev_start < start + size;
+ return start < vm->addr + vm->region_size && vm->addr < start + size;
}
/*
- * Test if a virtio-mem device owns a memory block. Can be called from
+ * Test if a virtio-mem device contains a given range. Can be called from
* (notifier) callbacks lockless.
*/
-static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
+static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
+ uint64_t size)
{
- return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id;
+ return start >= vm->addr && start + size <= vm->addr + vm->region_size;
}
-static int virtio_mem_notify_going_online(struct virtio_mem *vm,
- unsigned long mb_id)
+static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- switch (virtio_mem_mb_get_state(vm, mb_id)) {
- case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
- case VIRTIO_MEM_MB_STATE_OFFLINE:
+ switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
+ case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
+ case VIRTIO_MEM_SBM_MB_OFFLINE:
return NOTIFY_OK;
default:
break;
@@ -533,108 +828,100 @@ static int virtio_mem_notify_going_online(struct virtio_mem *vm,
return NOTIFY_BAD;
}
-static void virtio_mem_notify_offline(struct virtio_mem *vm,
- unsigned long mb_id)
+static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- switch (virtio_mem_mb_get_state(vm, mb_id)) {
- case VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL:
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+ switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
+ case VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
break;
- case VIRTIO_MEM_MB_STATE_ONLINE:
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE);
+ case VIRTIO_MEM_SBM_MB_ONLINE:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE);
break;
default:
BUG();
break;
}
-
- /*
- * Trigger the workqueue, maybe we can now unplug memory. Also,
- * when we offline and remove a memory block, this will re-trigger
- * us immediately - which is often nice because the removal of
- * the memory block (e.g., memmap) might have freed up memory
- * on other memory blocks we manage.
- */
- virtio_mem_retry(vm);
}
-static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id)
+static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- unsigned long nb_offline;
-
- switch (virtio_mem_mb_get_state(vm, mb_id)) {
- case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
+ switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
+ case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL);
break;
- case VIRTIO_MEM_MB_STATE_OFFLINE:
- virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_ONLINE);
+ case VIRTIO_MEM_SBM_MB_OFFLINE:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE);
break;
default:
BUG();
break;
}
- nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
-
- /* see if we can add new blocks now that we onlined one block */
- if (nb_offline == VIRTIO_MEM_NB_OFFLINE_THRESHOLD - 1)
- virtio_mem_retry(vm);
}
-static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
- unsigned long mb_id)
+static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
- struct page *page;
+ const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
unsigned long pfn;
- int sb_id, i;
+ int sb_id;
- for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
- if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
+ if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
continue;
- /*
- * Drop our reference to the pages so the memory can get
- * offlined and add the unplugged pages to the managed
- * page counters (so offlining code can correctly subtract
- * them again).
- */
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size);
- adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
- for (i = 0; i < nr_pages; i++) {
- page = pfn_to_page(pfn + i);
- if (WARN_ON(!page_ref_dec_and_test(page)))
- dump_page(page, "unplugged page referenced");
- }
+ sb_id * vm->sbm.sb_size);
+ virtio_mem_fake_offline_going_offline(pfn, nr_pages);
}
}
-static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
- unsigned long mb_id)
+static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
+ const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
unsigned long pfn;
- int sb_id, i;
+ int sb_id;
- for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
- if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
+ if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
continue;
- /*
- * Get the reference we dropped when going offline and
- * subtract the unplugged pages from the managed page
- * counters.
- */
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size);
- adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
- for (i = 0; i < nr_pages; i++)
- page_ref_inc(pfn_to_page(pfn + i));
+ sb_id * vm->sbm.sb_size);
+ virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
}
}
+static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm,
+ unsigned long bb_id,
+ unsigned long pfn,
+ unsigned long nr_pages)
+{
+ /*
+ * When marked as "fake-offline", all online memory of this device block
+ * is allocated by us. Otherwise, we don't have any memory allocated.
+ */
+ if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
+ return;
+ virtio_mem_fake_offline_going_offline(pfn, nr_pages);
+}
+
+static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm,
+ unsigned long bb_id,
+ unsigned long pfn,
+ unsigned long nr_pages)
+{
+ if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
+ return;
+ virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
+}
+
/*
* This callback will either be called synchronously from add_memory() or
* asynchronously (e.g., triggered via user space). We have to be careful
@@ -648,20 +935,33 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
struct memory_notify *mhp = arg;
const unsigned long start = PFN_PHYS(mhp->start_pfn);
const unsigned long size = PFN_PHYS(mhp->nr_pages);
- const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
int rc = NOTIFY_OK;
+ unsigned long id;
if (!virtio_mem_overlaps_range(vm, start, size))
return NOTIFY_DONE;
- /*
- * Memory is onlined/offlined in memory block granularity. We cannot
- * cross virtio-mem device boundaries and memory block boundaries. Bail
- * out if this ever changes.
- */
- if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
- !IS_ALIGNED(start, memory_block_size_bytes())))
- return NOTIFY_BAD;
+ if (vm->in_sbm) {
+ id = virtio_mem_phys_to_mb_id(start);
+ /*
+ * In SBM, we add memory in separate memory blocks - we expect
+ * it to be onlined/offlined in the same granularity. Bail out
+ * if this ever changes.
+ */
+ if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
+ !IS_ALIGNED(start, memory_block_size_bytes())))
+ return NOTIFY_BAD;
+ } else {
+ id = virtio_mem_phys_to_bb_id(vm, start);
+ /*
+ * In BBM, we only care about onlining/offlining happening
+ * within a single big block, we don't care about the
+ * actual granularity as we don't track individual Linux
+ * memory blocks.
+ */
+ if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1)))
+ return NOTIFY_BAD;
+ }
/*
* Avoid circular locking lockdep warnings. We lock the mutex
@@ -680,7 +980,12 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
break;
}
vm->hotplug_active = true;
- virtio_mem_notify_going_offline(vm, mb_id);
+ if (vm->in_sbm)
+ virtio_mem_sbm_notify_going_offline(vm, id);
+ else
+ virtio_mem_bbm_notify_going_offline(vm, id,
+ mhp->start_pfn,
+ mhp->nr_pages);
break;
case MEM_GOING_ONLINE:
mutex_lock(&vm->hotplug_mutex);
@@ -690,22 +995,51 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
break;
}
vm->hotplug_active = true;
- rc = virtio_mem_notify_going_online(vm, mb_id);
+ if (vm->in_sbm)
+ rc = virtio_mem_sbm_notify_going_online(vm, id);
break;
case MEM_OFFLINE:
- virtio_mem_notify_offline(vm, mb_id);
+ if (vm->in_sbm)
+ virtio_mem_sbm_notify_offline(vm, id);
+
+ atomic64_add(size, &vm->offline_size);
+ /*
+ * Trigger the workqueue. Now that we have some offline memory,
+ * maybe we can handle pending unplug requests.
+ */
+ if (!unplug_online)
+ virtio_mem_retry(vm);
+
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
case MEM_ONLINE:
- virtio_mem_notify_online(vm, mb_id);
+ if (vm->in_sbm)
+ virtio_mem_sbm_notify_online(vm, id);
+
+ atomic64_sub(size, &vm->offline_size);
+ /*
+ * Start adding more memory once we onlined half of our
+ * threshold. Don't trigger if it's possibly due to our actipn
+ * (e.g., us adding memory which gets onlined immediately from
+ * the core).
+ */
+ if (!atomic_read(&vm->wq_active) &&
+ virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
+ virtio_mem_retry(vm);
+
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
case MEM_CANCEL_OFFLINE:
if (!vm->hotplug_active)
break;
- virtio_mem_notify_cancel_offline(vm, mb_id);
+ if (vm->in_sbm)
+ virtio_mem_sbm_notify_cancel_offline(vm, id);
+ else
+ virtio_mem_bbm_notify_cancel_offline(vm, id,
+ mhp->start_pfn,
+ mhp->nr_pages);
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
@@ -729,7 +1063,7 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
* (via generic_online_page()) using PageDirty().
*/
static void virtio_mem_set_fake_offline(unsigned long pfn,
- unsigned int nr_pages, bool onlined)
+ unsigned long nr_pages, bool onlined)
{
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
@@ -748,7 +1082,7 @@ static void virtio_mem_set_fake_offline(unsigned long pfn,
* (via generic_online_page()), clear PageDirty().
*/
static void virtio_mem_clear_fake_offline(unsigned long pfn,
- unsigned int nr_pages, bool onlined)
+ unsigned long nr_pages, bool onlined)
{
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
@@ -763,16 +1097,17 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
* Release a range of fake-offline pages to the buddy, effectively
* fake-onlining them.
*/
-static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
+static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{
- const int order = MAX_ORDER - 1;
- int i;
+ const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES;
+ unsigned long i;
/*
- * We are always called with subblock granularity, which is at least
- * aligned to MAX_ORDER - 1.
+ * We are always called at least with MAX_ORDER_NR_PAGES
+ * granularity/alignment (e.g., the way subblocks work). All pages
+ * inside such a block are alike.
*/
- for (i = 0; i < nr_pages; i += 1 << order) {
+ for (i = 0; i < nr_pages; i += max_nr_pages) {
struct page *page = pfn_to_page(pfn + i);
/*
@@ -782,42 +1117,128 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
* alike.
*/
if (PageDirty(page)) {
- virtio_mem_clear_fake_offline(pfn + i, 1 << order,
+ virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
false);
- generic_online_page(page, order);
+ generic_online_page(page, MAX_ORDER - 1);
} else {
- virtio_mem_clear_fake_offline(pfn + i, 1 << order,
+ virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
true);
- free_contig_range(pfn + i, 1 << order);
- adjust_managed_page_count(page, 1 << order);
+ free_contig_range(pfn + i, max_nr_pages);
+ adjust_managed_page_count(page, max_nr_pages);
}
}
}
+/*
+ * Try to allocate a range, marking pages fake-offline, effectively
+ * fake-offlining them.
+ */
+static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
+{
+ const bool is_movable = zone_idx(page_zone(pfn_to_page(pfn))) ==
+ ZONE_MOVABLE;
+ int rc, retry_count;
+
+ /*
+ * TODO: We want an alloc_contig_range() mode that tries to allocate
+ * harder (e.g., dealing with temporarily pinned pages, PCP), especially
+ * with ZONE_MOVABLE. So for now, retry a couple of times with
+ * ZONE_MOVABLE before giving up - because that zone is supposed to give
+ * some guarantees.
+ */
+ for (retry_count = 0; retry_count < 5; retry_count++) {
+ rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
+ GFP_KERNEL);
+ if (rc == -ENOMEM)
+ /* whoops, out of memory */
+ return rc;
+ else if (rc && !is_movable)
+ break;
+ else if (rc)
+ continue;
+
+ virtio_mem_set_fake_offline(pfn, nr_pages, true);
+ adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/*
+ * Handle fake-offline pages when memory is going offline - such that the
+ * pages can be skipped by mm-core when offlining.
+ */
+static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
+ unsigned long nr_pages)
+{
+ struct page *page;
+ unsigned long i;
+
+ /*
+ * Drop our reference to the pages so the memory can get offlined
+ * and add the unplugged pages to the managed page counters (so
+ * offlining code can correctly subtract them again).
+ */
+ adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
+ /* Drop our reference to the pages so the memory can get offlined. */
+ for (i = 0; i < nr_pages; i++) {
+ page = pfn_to_page(pfn + i);
+ if (WARN_ON(!page_ref_dec_and_test(page)))
+ dump_page(page, "fake-offline page referenced");
+ }
+}
+
+/*
+ * Handle fake-offline pages when memory offlining is canceled - to undo
+ * what we did in virtio_mem_fake_offline_going_offline().
+ */
+static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
+ unsigned long nr_pages)
+{
+ unsigned long i;
+
+ /*
+ * Get the reference we dropped when going offline and subtract the
+ * unplugged pages from the managed page counters.
+ */
+ adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
+ for (i = 0; i < nr_pages; i++)
+ page_ref_inc(pfn_to_page(pfn + i));
+}
+
static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
{
const unsigned long addr = page_to_phys(page);
- const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
+ unsigned long id, sb_id;
struct virtio_mem *vm;
- int sb_id;
+ bool do_online;
- /*
- * We exploit here that subblocks have at least MAX_ORDER - 1
- * size/alignment and that this callback is is called with such a
- * size/alignment. So we cannot cross subblocks and therefore
- * also not memory blocks.
- */
rcu_read_lock();
list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
- if (!virtio_mem_owned_mb(vm, mb_id))
+ if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
continue;
- sb_id = virtio_mem_phys_to_sb_id(vm, addr);
- /*
- * If plugged, online the pages, otherwise, set them fake
- * offline (PageOffline).
- */
- if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ if (vm->in_sbm) {
+ /*
+ * We exploit here that subblocks have at least
+ * MAX_ORDER_NR_PAGES size/alignment - so we cannot
+ * cross subblocks within one call.
+ */
+ id = virtio_mem_phys_to_mb_id(addr);
+ sb_id = virtio_mem_phys_to_sb_id(vm, addr);
+ do_online = virtio_mem_sbm_test_sb_plugged(vm, id,
+ sb_id, 1);
+ } else {
+ /*
+ * If the whole block is marked fake offline, keep
+ * everything that way.
+ */
+ id = virtio_mem_phys_to_bb_id(vm, addr);
+ do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
+ }
+ if (do_online)
generic_online_page(page, order);
else
virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
@@ -870,23 +1291,33 @@ static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
.u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
.u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
};
+ int rc = -ENOMEM;
if (atomic_read(&vm->config_changed))
return -EAGAIN;
+ dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
vm->plugged_size += size;
return 0;
case VIRTIO_MEM_RESP_NACK:
- return -EAGAIN;
+ rc = -EAGAIN;
+ break;
case VIRTIO_MEM_RESP_BUSY:
- return -ETXTBSY;
+ rc = -ETXTBSY;
+ break;
case VIRTIO_MEM_RESP_ERROR:
- return -EINVAL;
+ rc = -EINVAL;
+ break;
default:
- return -ENOMEM;
+ break;
}
+
+ dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
+ return rc;
}
static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
@@ -898,21 +1329,30 @@ static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
.u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
.u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
};
+ int rc = -ENOMEM;
if (atomic_read(&vm->config_changed))
return -EAGAIN;
+ dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
vm->plugged_size -= size;
return 0;
case VIRTIO_MEM_RESP_BUSY:
- return -ETXTBSY;
+ rc = -ETXTBSY;
+ break;
case VIRTIO_MEM_RESP_ERROR:
- return -EINVAL;
+ rc = -EINVAL;
+ break;
default:
- return -ENOMEM;
+ break;
}
+
+ dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
+ return rc;
}
static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
@@ -920,6 +1360,9 @@ static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
const struct virtio_mem_req req = {
.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
};
+ int rc = -ENOMEM;
+
+ dev_dbg(&vm->vdev->dev, "unplugging all memory");
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
@@ -929,30 +1372,31 @@ static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
atomic_set(&vm->config_changed, 1);
return 0;
case VIRTIO_MEM_RESP_BUSY:
- return -ETXTBSY;
+ rc = -ETXTBSY;
+ break;
default:
- return -ENOMEM;
+ break;
}
+
+ dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
+ return rc;
}
/*
* Plug selected subblocks. Updates the plugged state, but not the state
* of the memory block.
*/
-static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
- int sb_id, int count)
+static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
+ int sb_id, int count)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size;
- const uint64_t size = count * vm->subblock_size;
+ sb_id * vm->sbm.sb_size;
+ const uint64_t size = count * vm->sbm.sb_size;
int rc;
- dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id,
- sb_id, sb_id + count - 1);
-
rc = virtio_mem_send_plug_request(vm, addr, size);
if (!rc)
- virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
+ virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
return rc;
}
@@ -960,24 +1404,47 @@ static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
* Unplug selected subblocks. Updates the plugged state, but not the state
* of the memory block.
*/
-static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
- int sb_id, int count)
+static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
+ int sb_id, int count)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size;
- const uint64_t size = count * vm->subblock_size;
+ sb_id * vm->sbm.sb_size;
+ const uint64_t size = count * vm->sbm.sb_size;
int rc;
- dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n",
- mb_id, sb_id, sb_id + count - 1);
-
rc = virtio_mem_send_unplug_request(vm, addr, size);
if (!rc)
- virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
+ virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
return rc;
}
/*
+ * Request to unplug a big block.
+ *
+ * Will not modify the state of the big block.
+ */
+static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
+
+ return virtio_mem_send_unplug_request(vm, addr, size);
+}
+
+/*
+ * Request to plug a big block.
+ *
+ * Will not modify the state of the big block.
+ */
+static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
+
+ return virtio_mem_send_plug_request(vm, addr, size);
+}
+
+/*
* Unplug the desired number of plugged subblocks of a offline or not-added
* memory block. Will fail if any subblock cannot get unplugged (instead of
* skipping it).
@@ -986,29 +1453,29 @@ static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
*
* Note: can fail after some subblocks were unplugged.
*/
-static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
- unsigned long mb_id, uint64_t *nb_sb)
+static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
+ unsigned long mb_id, uint64_t *nb_sb)
{
int sb_id, count;
int rc;
- sb_id = vm->nb_sb_per_mb - 1;
+ sb_id = vm->sbm.sbs_per_mb - 1;
while (*nb_sb) {
/* Find the next candidate subblock */
while (sb_id >= 0 &&
- virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
+ virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
sb_id--;
if (sb_id < 0)
break;
/* Try to unplug multiple subblocks at a time */
count = 1;
while (count < *nb_sb && sb_id > 0 &&
- virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
+ virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
count++;
sb_id--;
}
- rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
+ rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
if (rc)
return rc;
*nb_sb -= count;
@@ -1025,63 +1492,50 @@ static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
*
* Note: can fail after some subblocks were unplugged.
*/
-static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id)
+static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
{
- uint64_t nb_sb = vm->nb_sb_per_mb;
+ uint64_t nb_sb = vm->sbm.sbs_per_mb;
- return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb);
+ return virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
}
/*
* Prepare tracking data for the next memory block.
*/
-static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
- unsigned long *mb_id)
+static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
+ unsigned long *mb_id)
{
int rc;
- if (vm->next_mb_id > vm->last_usable_mb_id)
+ if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
return -ENOSPC;
/* Resize the state array if required. */
- rc = virtio_mem_mb_state_prepare_next_mb(vm);
+ rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
if (rc)
return rc;
/* Resize the subblock bitmap if required. */
- rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
+ rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
if (rc)
return rc;
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++;
- *mb_id = vm->next_mb_id++;
+ vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
+ *mb_id = vm->sbm.next_mb_id++;
return 0;
}
/*
- * Don't add too many blocks that are not onlined yet to avoid running OOM.
- */
-static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm)
-{
- unsigned long nb_offline;
-
- nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
- return nb_offline >= VIRTIO_MEM_NB_OFFLINE_THRESHOLD;
-}
-
-/*
* Try to plug the desired number of subblocks and add the memory block
* to Linux.
*
* Will modify the state of the memory block.
*/
-static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
- unsigned long mb_id,
- uint64_t *nb_sb)
+static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
+ unsigned long mb_id, uint64_t *nb_sb)
{
- const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb);
- int rc, rc2;
+ const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
+ int rc;
if (WARN_ON_ONCE(!count))
return -EINVAL;
@@ -1090,7 +1544,7 @@ static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
* Plug the requested number of subblocks before adding it to linux,
* so that onlining will directly online all plugged subblocks.
*/
- rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count);
+ rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
if (rc)
return rc;
@@ -1098,29 +1552,21 @@ static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
* Mark the block properly offline before adding it to Linux,
* so the memory notifiers will find the block in the right state.
*/
- if (count == vm->nb_sb_per_mb)
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE);
+ if (count == vm->sbm.sbs_per_mb)
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE);
else
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
/* Add the memory block to linux - if that fails, try to unplug. */
- rc = virtio_mem_mb_add(vm, mb_id);
+ rc = virtio_mem_sbm_add_mb(vm, mb_id);
if (rc) {
- enum virtio_mem_mb_state new_state = VIRTIO_MEM_MB_STATE_UNUSED;
+ int new_state = VIRTIO_MEM_SBM_MB_UNUSED;
- dev_err(&vm->vdev->dev,
- "adding memory block %lu failed with %d\n", mb_id, rc);
- rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count);
-
- /*
- * TODO: Linux MM does not properly clean up yet in all cases
- * where adding of memory failed - especially on -ENOMEM.
- */
- if (rc2)
- new_state = VIRTIO_MEM_MB_STATE_PLUGGED;
- virtio_mem_mb_set_state(vm, mb_id, new_state);
+ if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
+ new_state = VIRTIO_MEM_SBM_MB_PLUGGED;
+ virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
return rc;
}
@@ -1136,8 +1582,9 @@ static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
*
* Note: Can fail after some subblocks were successfully plugged.
*/
-static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
- uint64_t *nb_sb, bool online)
+static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
+ unsigned long mb_id, uint64_t *nb_sb,
+ bool online)
{
unsigned long pfn, nr_pages;
int sb_id, count;
@@ -1147,17 +1594,16 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
return -EINVAL;
while (*nb_sb) {
- sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
- if (sb_id >= vm->nb_sb_per_mb)
+ sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
+ if (sb_id >= vm->sbm.sbs_per_mb)
break;
count = 1;
while (count < *nb_sb &&
- sb_id + count < vm->nb_sb_per_mb &&
- !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
- 1))
+ sb_id + count < vm->sbm.sbs_per_mb &&
+ !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
count++;
- rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
+ rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
if (rc)
return rc;
*nb_sb -= count;
@@ -1166,29 +1612,26 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
/* fake-online the pages if the memory block is online */
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size);
- nr_pages = PFN_DOWN(count * vm->subblock_size);
+ sb_id * vm->sbm.sb_size);
+ nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
virtio_mem_fake_online(pfn, nr_pages);
}
- if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
if (online)
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE);
else
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE);
}
return 0;
}
-/*
- * Try to plug the requested amount of memory.
- */
-static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
+static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
{
- uint64_t nb_sb = diff / vm->subblock_size;
+ uint64_t nb_sb = diff / vm->sbm.sb_size;
unsigned long mb_id;
int rc;
@@ -1199,18 +1642,18 @@ static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
mutex_lock(&vm->hotplug_mutex);
/* Try to plug subblocks of partially plugged online blocks. */
- virtio_mem_for_each_mb_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
- rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true);
+ virtio_mem_sbm_for_each_mb(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) {
+ rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, true);
if (rc || !nb_sb)
goto out_unlock;
cond_resched();
}
/* Try to plug subblocks of partially plugged offline blocks. */
- virtio_mem_for_each_mb_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
- rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false);
+ virtio_mem_sbm_for_each_mb(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
+ rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, false);
if (rc || !nb_sb)
goto out_unlock;
cond_resched();
@@ -1223,11 +1666,11 @@ static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
mutex_unlock(&vm->hotplug_mutex);
/* Try to plug and add unused blocks */
- virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) {
- if (virtio_mem_too_many_mb_offline(vm))
+ virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
+ if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
return -ENOSPC;
- rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
+ rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
return rc;
cond_resched();
@@ -1235,13 +1678,13 @@ static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
/* Try to prepare, plug and add new blocks */
while (nb_sb) {
- if (virtio_mem_too_many_mb_offline(vm))
+ if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
return -ENOSPC;
- rc = virtio_mem_prepare_next_mb(vm, &mb_id);
+ rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
if (rc)
return rc;
- rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
+ rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
if (rc)
return rc;
cond_resched();
@@ -1254,6 +1697,112 @@ out_unlock:
}
/*
+ * Plug a big block and add it to Linux.
+ *
+ * Will modify the state of the big block.
+ */
+static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ int rc;
+
+ if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_UNUSED))
+ return -EINVAL;
+
+ rc = virtio_mem_bbm_plug_bb(vm, bb_id);
+ if (rc)
+ return rc;
+ virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
+
+ rc = virtio_mem_bbm_add_bb(vm, bb_id);
+ if (rc) {
+ if (!virtio_mem_bbm_unplug_bb(vm, bb_id))
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_UNUSED);
+ else
+ /* Retry from the main loop. */
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_PLUGGED);
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Prepare tracking data for the next big block.
+ */
+static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm,
+ unsigned long *bb_id)
+{
+ int rc;
+
+ if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id)
+ return -ENOSPC;
+
+ /* Resize the big block state array if required. */
+ rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm);
+ if (rc)
+ return rc;
+
+ vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++;
+ *bb_id = vm->bbm.next_bb_id;
+ vm->bbm.next_bb_id++;
+ return 0;
+}
+
+static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ uint64_t nb_bb = diff / vm->bbm.bb_size;
+ unsigned long bb_id;
+ int rc;
+
+ if (!nb_bb)
+ return 0;
+
+ /* Try to plug and add unused big blocks */
+ virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) {
+ if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
+ return -ENOSPC;
+
+ rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
+ if (!rc)
+ nb_bb--;
+ if (rc || !nb_bb)
+ return rc;
+ cond_resched();
+ }
+
+ /* Try to prepare, plug and add new big blocks */
+ while (nb_bb) {
+ if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
+ return -ENOSPC;
+
+ rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id);
+ if (rc)
+ return rc;
+ rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
+ if (!rc)
+ nb_bb--;
+ if (rc)
+ return rc;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+/*
+ * Try to plug the requested amount of memory.
+ */
+static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ if (vm->in_sbm)
+ return virtio_mem_sbm_plug_request(vm, diff);
+ return virtio_mem_bbm_plug_request(vm, diff);
+}
+
+/*
* Unplug the desired number of plugged subblocks of an offline memory block.
* Will fail if any subblock cannot get unplugged (instead of skipping it).
*
@@ -1262,33 +1811,33 @@ out_unlock:
*
* Note: Can fail after some subblocks were successfully unplugged.
*/
-static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
- unsigned long mb_id,
- uint64_t *nb_sb)
+static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
+ unsigned long mb_id,
+ uint64_t *nb_sb)
{
int rc;
- rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
+ rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, nb_sb);
/* some subblocks might have been unplugged even on failure */
- if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+ if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
if (rc)
return rc;
- if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
/*
* Remove the block from Linux - this should never fail.
* Hinder the block from getting onlined by marking it
* unplugged. Temporarily drop the mutex, so
* any pending GOING_ONLINE requests can be serviced/rejected.
*/
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_UNUSED);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
mutex_unlock(&vm->hotplug_mutex);
- rc = virtio_mem_mb_remove(vm, mb_id);
+ rc = virtio_mem_sbm_remove_mb(vm, mb_id);
BUG_ON(rc);
mutex_lock(&vm->hotplug_mutex);
}
@@ -1300,38 +1849,31 @@ static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
*
* Will modify the state of the memory block.
*/
-static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
+ const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
unsigned long start_pfn;
int rc;
start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size);
- rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
- MIGRATE_MOVABLE, GFP_KERNEL);
- if (rc == -ENOMEM)
- /* whoops, out of memory */
- return rc;
- if (rc)
- return -EBUSY;
+ sb_id * vm->sbm.sb_size);
- /* Mark it as fake-offline before unplugging it */
- virtio_mem_set_fake_offline(start_pfn, nr_pages, true);
- adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
+ rc = virtio_mem_fake_offline(start_pfn, nr_pages);
+ if (rc)
+ return rc;
/* Try to unplug the allocated memory */
- rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
+ rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
if (rc) {
/* Return the memory to the buddy. */
virtio_mem_fake_online(start_pfn, nr_pages);
return rc;
}
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL);
return 0;
}
@@ -1345,34 +1887,34 @@ static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
* Note: Can fail after some subblocks were successfully unplugged. Can
* return 0 even if subblocks were busy and could not get unplugged.
*/
-static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
- unsigned long mb_id,
- uint64_t *nb_sb)
+static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
+ unsigned long mb_id,
+ uint64_t *nb_sb)
{
int rc, sb_id;
/* If possible, try to unplug the complete block in one shot. */
- if (*nb_sb >= vm->nb_sb_per_mb &&
- virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
- rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
- vm->nb_sb_per_mb);
+ if (*nb_sb >= vm->sbm.sbs_per_mb &&
+ virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
+ rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
+ vm->sbm.sbs_per_mb);
if (!rc) {
- *nb_sb -= vm->nb_sb_per_mb;
+ *nb_sb -= vm->sbm.sbs_per_mb;
goto unplugged;
} else if (rc != -EBUSY)
return rc;
}
/* Fallback to single subblocks. */
- for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
+ for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
/* Find the next candidate subblock */
while (sb_id >= 0 &&
- !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
sb_id--;
if (sb_id < 0)
break;
- rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1);
+ rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
if (rc == -EBUSY)
continue;
else if (rc)
@@ -1386,24 +1928,21 @@ unplugged:
* remove it. This will usually not fail, as no memory is in use
* anymore - however some other notifiers might NACK the request.
*/
- if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
mutex_unlock(&vm->hotplug_mutex);
- rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
+ rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
mutex_lock(&vm->hotplug_mutex);
if (!rc)
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_UNUSED);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
}
return 0;
}
-/*
- * Try to unplug the requested amount of memory.
- */
-static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
+static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
{
- uint64_t nb_sb = diff / vm->subblock_size;
+ uint64_t nb_sb = diff / vm->sbm.sb_size;
unsigned long mb_id;
int rc;
@@ -1418,20 +1957,17 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
mutex_lock(&vm->hotplug_mutex);
/* Try to unplug subblocks of partially plugged offline blocks. */
- virtio_mem_for_each_mb_state_rev(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
- rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
- &nb_sb);
+ virtio_mem_sbm_for_each_mb_rev(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
+ rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
cond_resched();
}
/* Try to unplug subblocks of plugged offline blocks. */
- virtio_mem_for_each_mb_state_rev(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE) {
- rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
- &nb_sb);
+ virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_OFFLINE) {
+ rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
cond_resched();
@@ -1443,10 +1979,9 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
}
/* Try to unplug subblocks of partially plugged online blocks. */
- virtio_mem_for_each_mb_state_rev(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
- rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
- &nb_sb);
+ virtio_mem_sbm_for_each_mb_rev(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) {
+ rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
mutex_unlock(&vm->hotplug_mutex);
@@ -1455,10 +1990,8 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
}
/* Try to unplug subblocks of plugged online blocks. */
- virtio_mem_for_each_mb_state_rev(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE) {
- rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
- &nb_sb);
+ virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_ONLINE) {
+ rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
mutex_unlock(&vm->hotplug_mutex);
@@ -1474,19 +2007,211 @@ out_unlock:
}
/*
+ * Try to offline and remove a big block from Linux and unplug it. Will fail
+ * with -EBUSY if some memory is busy and cannot get unplugged.
+ *
+ * Will modify the state of the memory block. Might temporarily drop the
+ * hotplug_mutex.
+ */
+static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
+ const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
+ unsigned long end_pfn = start_pfn + nr_pages;
+ unsigned long pfn;
+ struct page *page;
+ int rc;
+
+ if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_ADDED))
+ return -EINVAL;
+
+ if (bbm_safe_unplug) {
+ /*
+ * Start by fake-offlining all memory. Once we marked the device
+ * block as fake-offline, all newly onlined memory will
+ * automatically be kept fake-offline. Protect from concurrent
+ * onlining/offlining until we have a consistent state.
+ */
+ mutex_lock(&vm->hotplug_mutex);
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ page = pfn_to_online_page(pfn);
+ if (!page)
+ continue;
+
+ rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION);
+ if (rc) {
+ end_pfn = pfn;
+ goto rollback_safe_unplug;
+ }
+ }
+ mutex_unlock(&vm->hotplug_mutex);
+ }
+
+ rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
+ if (rc) {
+ if (bbm_safe_unplug) {
+ mutex_lock(&vm->hotplug_mutex);
+ goto rollback_safe_unplug;
+ }
+ return rc;
+ }
+
+ rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
+ if (rc)
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_PLUGGED);
+ else
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_UNUSED);
+ return rc;
+
+rollback_safe_unplug:
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ page = pfn_to_online_page(pfn);
+ if (!page)
+ continue;
+ virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
+ }
+ virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
+ mutex_unlock(&vm->hotplug_mutex);
+ return rc;
+}
+
+/*
+ * Try to remove a big block from Linux and unplug it. Will fail with
+ * -EBUSY if some memory is online.
+ *
+ * Will modify the state of the memory block.
+ */
+static int virtio_mem_bbm_remove_and_unplug_bb(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ int rc;
+
+ if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_ADDED))
+ return -EINVAL;
+
+ rc = virtio_mem_bbm_remove_bb(vm, bb_id);
+ if (rc)
+ return -EBUSY;
+
+ rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
+ if (rc)
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_PLUGGED);
+ else
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_UNUSED);
+ return rc;
+}
+
+/*
+ * Test if a big block is completely offline.
+ */
+static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
+ const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages;
+ pfn += PAGES_PER_SECTION) {
+ if (pfn_to_online_page(pfn))
+ return false;
+ }
+
+ return true;
+}
+
+static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ uint64_t nb_bb = diff / vm->bbm.bb_size;
+ uint64_t bb_id;
+ int rc;
+
+ if (!nb_bb)
+ return 0;
+
+ /* Try to unplug completely offline big blocks first. */
+ virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
+ cond_resched();
+ /*
+ * As we're holding no locks, this check is racy as memory
+ * can get onlined in the meantime - but we'll fail gracefully.
+ */
+ if (!virtio_mem_bbm_bb_is_offline(vm, bb_id))
+ continue;
+ rc = virtio_mem_bbm_remove_and_unplug_bb(vm, bb_id);
+ if (rc == -EBUSY)
+ continue;
+ if (!rc)
+ nb_bb--;
+ if (rc || !nb_bb)
+ return rc;
+ }
+
+ if (!unplug_online)
+ return 0;
+
+ /* Try to unplug any big blocks. */
+ virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
+ cond_resched();
+ rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
+ if (rc == -EBUSY)
+ continue;
+ if (!rc)
+ nb_bb--;
+ if (rc || !nb_bb)
+ return rc;
+ }
+
+ return nb_bb ? -EBUSY : 0;
+}
+
+/*
+ * Try to unplug the requested amount of memory.
+ */
+static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ if (vm->in_sbm)
+ return virtio_mem_sbm_unplug_request(vm, diff);
+ return virtio_mem_bbm_unplug_request(vm, diff);
+}
+
+/*
* Try to unplug all blocks that couldn't be unplugged before, for example,
* because the hypervisor was busy.
*/
static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
{
- unsigned long mb_id;
+ unsigned long id;
int rc;
- virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) {
- rc = virtio_mem_mb_unplug(vm, mb_id);
+ if (!vm->in_sbm) {
+ virtio_mem_bbm_for_each_bb(vm, id,
+ VIRTIO_MEM_BBM_BB_PLUGGED) {
+ rc = virtio_mem_bbm_unplug_bb(vm, id);
+ if (rc)
+ return rc;
+ virtio_mem_bbm_set_bb_state(vm, id,
+ VIRTIO_MEM_BBM_BB_UNUSED);
+ }
+ return 0;
+ }
+
+ virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) {
+ rc = virtio_mem_sbm_unplug_mb(vm, id);
if (rc)
return rc;
- virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
+ virtio_mem_sbm_set_mb_state(vm, id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
}
return 0;
@@ -1511,7 +2236,13 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
usable_region_size, &usable_region_size);
end_addr = vm->addr + usable_region_size;
end_addr = min(end_addr, phys_limit);
- vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
+
+ if (vm->in_sbm)
+ vm->sbm.last_usable_mb_id =
+ virtio_mem_phys_to_mb_id(end_addr) - 1;
+ else
+ vm->bbm.last_usable_bb_id =
+ virtio_mem_phys_to_bb_id(vm, end_addr) - 1;
/* see if there is a request to change the size */
virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
@@ -1535,6 +2266,7 @@ static void virtio_mem_run_wq(struct work_struct *work)
if (vm->broken)
return;
+ atomic_set(&vm->wq_active, 1);
retry:
rc = 0;
@@ -1595,6 +2327,8 @@ retry:
"unknown error, marking device broken: %d\n", rc);
vm->broken = true;
}
+
+ atomic_set(&vm->wq_active, 0);
}
static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
@@ -1631,6 +2365,7 @@ static int virtio_mem_init_vq(struct virtio_mem *vm)
static int virtio_mem_init(struct virtio_mem *vm)
{
const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+ uint64_t sb_size, addr;
uint16_t node_id;
if (!vm->vdev->config->get) {
@@ -1659,15 +2394,9 @@ static int virtio_mem_init(struct virtio_mem *vm)
virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
&vm->region_size);
- /*
- * We always hotplug memory in memory block granularity. This way,
- * we have to wait for exactly one memory block to online.
- */
- if (vm->device_block_size > memory_block_size_bytes()) {
- dev_err(&vm->vdev->dev,
- "The block size is not supported (too big).\n");
- return -EINVAL;
- }
+ /* Determine the nid for the device based on the lowest address. */
+ if (vm->nid == NUMA_NO_NODE)
+ vm->nid = memory_add_physaddr_to_nid(vm->addr);
/* bad device setup - warn only */
if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
@@ -1681,23 +2410,57 @@ static int virtio_mem_init(struct virtio_mem *vm)
"Some memory is not addressable. This can make some memory unusable.\n");
/*
- * Calculate the subblock size:
- * - At least MAX_ORDER - 1 / pageblock_order.
- * - At least the device block size.
- * In the worst case, a single subblock per memory block.
+ * We want subblocks to span at least MAX_ORDER_NR_PAGES and
+ * pageblock_nr_pages pages. This:
+ * - Simplifies our page onlining code (virtio_mem_online_page_cb)
+ * and fake page onlining code (virtio_mem_fake_online).
+ * - Is required for now for alloc_contig_range() to work reliably -
+ * it doesn't properly handle smaller granularity on ZONE_NORMAL.
*/
- vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1,
- pageblock_order);
- vm->subblock_size = max_t(uint64_t, vm->device_block_size,
- vm->subblock_size);
- vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size;
-
- /* Round up to the next full memory block */
- vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
- memory_block_size_bytes());
- vm->next_mb_id = vm->first_mb_id;
- vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr +
- vm->region_size) - 1;
+ sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES,
+ pageblock_nr_pages) * PAGE_SIZE;
+ sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
+
+ if (sb_size < memory_block_size_bytes() && !force_bbm) {
+ /* SBM: At least two subblocks per Linux memory block. */
+ vm->in_sbm = true;
+ vm->sbm.sb_size = sb_size;
+ vm->sbm.sbs_per_mb = memory_block_size_bytes() /
+ vm->sbm.sb_size;
+
+ /* Round up to the next full memory block */
+ addr = vm->addr + memory_block_size_bytes() - 1;
+ vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
+ vm->sbm.next_mb_id = vm->sbm.first_mb_id;
+ } else {
+ /* BBM: At least one Linux memory block. */
+ vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size,
+ memory_block_size_bytes());
+
+ if (bbm_block_size) {
+ if (!is_power_of_2(bbm_block_size)) {
+ dev_warn(&vm->vdev->dev,
+ "bbm_block_size is not a power of 2");
+ } else if (bbm_block_size < vm->bbm.bb_size) {
+ dev_warn(&vm->vdev->dev,
+ "bbm_block_size is too small");
+ } else {
+ vm->bbm.bb_size = bbm_block_size;
+ }
+ }
+
+ /* Round up to the next aligned big block */
+ addr = vm->addr + vm->bbm.bb_size - 1;
+ vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
+ vm->bbm.next_bb_id = vm->bbm.first_bb_id;
+ }
+
+ /* Prepare the offline threshold - make sure we can add two blocks. */
+ vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
+ VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
+ /* In BBM, we also want at least two big blocks. */
+ vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
+ vm->offline_threshold);
dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
@@ -1705,9 +2468,13 @@ static int virtio_mem_init(struct virtio_mem *vm)
(unsigned long long)vm->device_block_size);
dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
memory_block_size_bytes());
- dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
- (unsigned long long)vm->subblock_size);
- if (vm->nid != NUMA_NO_NODE)
+ if (vm->in_sbm)
+ dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
+ (unsigned long long)vm->sbm.sb_size);
+ else
+ dev_info(&vm->vdev->dev, "big block size: 0x%llx",
+ (unsigned long long)vm->bbm.bb_size);
+ if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
return 0;
@@ -1753,6 +2520,20 @@ static void virtio_mem_delete_resource(struct virtio_mem *vm)
vm->parent_resource = NULL;
}
+static int virtio_mem_range_has_system_ram(struct resource *res, void *arg)
+{
+ return 1;
+}
+
+static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
+{
+ const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
+ vm->addr + vm->region_size, NULL,
+ virtio_mem_range_has_system_ram) == 1;
+}
+
static int virtio_mem_probe(struct virtio_device *vdev)
{
struct virtio_mem *vm;
@@ -1849,21 +2630,24 @@ static void virtio_mem_remove(struct virtio_device *vdev)
cancel_work_sync(&vm->wq);
hrtimer_cancel(&vm->retry_timer);
- /*
- * After we unregistered our callbacks, user space can online partially
- * plugged offline blocks. Make sure to remove them.
- */
- virtio_mem_for_each_mb_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
- rc = virtio_mem_mb_remove(vm, mb_id);
- BUG_ON(rc);
- virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
+ if (vm->in_sbm) {
+ /*
+ * After we unregistered our callbacks, user space can online
+ * partially plugged offline blocks. Make sure to remove them.
+ */
+ virtio_mem_sbm_for_each_mb(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
+ rc = virtio_mem_sbm_remove_mb(vm, mb_id);
+ BUG_ON(rc);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
+ }
+ /*
+ * After we unregistered our callbacks, user space can no longer
+ * offline partially plugged online memory blocks. No need to
+ * worry about them.
+ */
}
- /*
- * After we unregistered our callbacks, user space can no longer
- * offline partially plugged online memory blocks. No need to worry
- * about them.
- */
/* unregister callbacks */
unregister_virtio_mem_device(vm);
@@ -1874,10 +2658,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
* the system. And there is no way to stop the driver/device from going
* away. Warn at least.
*/
- if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL]) {
+ if (virtio_mem_has_memory_added(vm)) {
dev_warn(&vdev->dev, "device still has system memory added\n");
} else {
virtio_mem_delete_resource(vm);
@@ -1885,8 +2666,12 @@ static void virtio_mem_remove(struct virtio_device *vdev)
}
/* remove all tracking data - no locking needed */
- vfree(vm->mb_state);
- vfree(vm->sb_bitmap);
+ if (vm->in_sbm) {
+ vfree(vm->sbm.mb_states);
+ vfree(vm->sbm.sb_states);
+ } else {
+ vfree(vm->bbm.bb_states);
+ }
/* reset the device and cleanup the queues */
vdev->config->reset(vdev);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index becc77697960..71e16b53e9c1 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1608,7 +1608,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
- list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
vq->last_add_time_valid = false;
@@ -1669,6 +1668,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
cpu_to_le16(vq->packed.event_flags_shadow);
}
+ list_add_tail(&vq->vq.list, &vdev->vqs);
return &vq->vq;
err_desc_extra:
@@ -1676,9 +1676,9 @@ err_desc_extra:
err_desc_state:
kfree(vq);
err_vq:
- vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr);
+ vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
err_device:
- vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr);
+ vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
err_driver:
vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
err_ring:
@@ -2085,7 +2085,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->last_used_idx = 0;
vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
- list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
vq->last_add_time_valid = false;
@@ -2127,6 +2126,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
memset(vq->split.desc_state, 0, vring.num *
sizeof(struct vring_desc_state_split));
+ list_add_tail(&vq->vq.list, &vdev->vqs);
return &vq->vq;
}
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index fd7968635e6d..7ff941e71b79 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -386,6 +386,7 @@ config ARM_SBSA_WATCHDOG
config ARMADA_37XX_WATCHDOG
tristate "Armada 37xx watchdog"
depends on ARCH_MVEBU || COMPILE_TEST
+ depends on HAS_IOMEM
select MFD_SYSCON
select WATCHDOG_CORE
help
@@ -599,7 +600,7 @@ config K3_RTI_WATCHDOG
config ORION_WATCHDOG
tristate "Orion watchdog"
- depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || (COMPILE_TEST && !ARCH_EBSA110)
+ depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || COMPILE_TEST
depends on ARM
select WATCHDOG_CORE
help
@@ -631,7 +632,7 @@ config SUNXI_WATCHDOG
config COH901327_WATCHDOG
bool "ST-Ericsson COH 901 327 watchdog"
- depends on ARCH_U300 || (ARM && COMPILE_TEST)
+ depends on ARCH_U300 || (ARM && COMMON_CLK && COMPILE_TEST)
default y if MACH_U300
select WATCHDOG_CORE
help
@@ -789,6 +790,7 @@ config MOXART_WDT
config SIRFSOC_WATCHDOG
tristate "SiRFSOC watchdog"
+ depends on HAS_IOMEM
depends on ARCH_SIRF || COMPILE_TEST
select WATCHDOG_CORE
default y
@@ -1696,16 +1698,6 @@ config WDT_MTX1
Hardware driver for the MTX-1 boards. This is a watchdog timer that
will reboot the machine after a 100 seconds timer expired.
-config PNX833X_WDT
- tristate "PNX833x Hardware Watchdog"
- depends on SOC_PNX8335
- depends on BROKEN
- help
- Hardware driver for the PNX833x's watchdog. This is a
- watchdog timer that will reboot the machine after a programmable
- timer has expired and no process has written to /dev/watchdog during
- that time.
-
config SIBYTE_WDOG
tristate "Sibyte SoC hardware watchdog"
depends on CPU_SB1 || (MIPS && COMPILE_TEST)
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 071a2e50be98..5c74ee19d441 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -161,7 +161,6 @@ obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
-obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 83418924e30a..0b699c783d57 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -150,8 +150,6 @@ static long geodewdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident,
sizeof(ident)) ? -EFAULT : 0;
- break;
-
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 7d34bcf1c45b..cbd1498ff015 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -21,8 +21,9 @@
#include <linux/types.h>
#include <linux/watchdog.h>
#include <asm/nmi.h>
+#include <linux/crash_dump.h>
-#define HPWDT_VERSION "2.0.3"
+#define HPWDT_VERSION "2.0.4"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TICKS 65535
@@ -334,6 +335,11 @@ static int hpwdt_init_one(struct pci_dev *dev,
watchdog_set_nowayout(&hpwdt_dev, nowayout);
watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL);
+ if (is_kdump_kernel()) {
+ pretimeout = 0;
+ kdumptimeout = 0;
+ }
+
if (pretimeout && hpwdt_dev.timeout <= PRETIMEOUT_SEC) {
dev_warn(&dev->dev, "timeout <= pretimeout. Setting pretimeout to zero\n");
pretimeout = 0;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index a370a185a41c..bf31d7b67a69 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -40,8 +40,6 @@
* Includes, defines, variables, module parameters, ...
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
/* Module and version information */
#define DRV_NAME "iTCO_wdt"
#define DRV_VERSION "1.11"
@@ -279,7 +277,7 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
/* disable chipset's NO_REBOOT bit */
if (p->update_no_reboot_bit(p->no_reboot_priv, false)) {
spin_unlock(&p->io_lock);
- pr_err("failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n");
+ dev_err(wd_dev->parent, "failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n");
return -EIO;
}
@@ -510,7 +508,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
/* Check chipset's NO_REBOOT bit */
if (p->update_no_reboot_bit(p->no_reboot_priv, false) &&
iTCO_vendor_check_noreboot_on()) {
- pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
+ dev_info(dev, "unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
return -ENODEV; /* Cannot reset NO_REBOOT bit */
}
@@ -530,12 +528,12 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
if (!devm_request_region(dev, p->tco_res->start,
resource_size(p->tco_res),
pdev->name)) {
- pr_err("I/O address 0x%04llx already in use, device disabled\n",
+ dev_err(dev, "I/O address 0x%04llx already in use, device disabled\n",
(u64)TCOBASE(p));
return -EBUSY;
}
- pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
+ dev_info(dev, "Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
pdata->name, pdata->version, (u64)TCOBASE(p));
/* Clear out the (probably old) status */
@@ -558,7 +556,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
break;
}
- p->wddev.info = &ident,
+ p->wddev.info = &ident,
p->wddev.ops = &iTCO_wdt_ops,
p->wddev.bootstatus = 0;
p->wddev.timeout = WATCHDOG_TIMEOUT;
@@ -575,7 +573,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
if not reset to the default */
if (iTCO_wdt_set_timeout(&p->wddev, heartbeat)) {
iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
- pr_info("timeout value out of range, using %d\n",
+ dev_info(dev, "timeout value out of range, using %d\n",
WATCHDOG_TIMEOUT);
}
@@ -583,11 +581,11 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_unregister(&p->wddev);
ret = devm_watchdog_register_device(dev, &p->wddev);
if (ret != 0) {
- pr_err("cannot register watchdog device (err=%d)\n", ret);
+ dev_err(dev, "cannot register watchdog device (err=%d)\n", ret);
return ret;
}
- pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
+ dev_info(dev, "initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
return 0;
@@ -651,21 +649,7 @@ static struct platform_driver iTCO_wdt_driver = {
},
};
-static int __init iTCO_wdt_init_module(void)
-{
- pr_info("Intel TCO WatchDog Timer Driver v%s\n", DRV_VERSION);
-
- return platform_driver_register(&iTCO_wdt_driver);
-}
-
-static void __exit iTCO_wdt_cleanup_module(void)
-{
- platform_driver_unregister(&iTCO_wdt_driver);
- pr_info("Watchdog Module Unloaded\n");
-}
-
-module_init(iTCO_wdt_init_module);
-module_exit(iTCO_wdt_cleanup_module);
+module_platform_driver(iTCO_wdt_driver);
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 3fc457bc16db..2f7ded32e878 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -175,8 +175,8 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
spin_lock_init(&ddata->lock);
- ddata->wdd.info = &mpc8xxx_wdt_info,
- ddata->wdd.ops = &mpc8xxx_wdt_ops,
+ ddata->wdd.info = &mpc8xxx_wdt_info;
+ ddata->wdd.ops = &mpc8xxx_wdt_ops;
ddata->wdd.timeout = WATCHDOG_TIMEOUT;
watchdog_init_timeout(&ddata->wdd, timeout, dev);
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
deleted file mode 100644
index 4097d076aab8..000000000000
--- a/drivers/watchdog/pnx833x_wdt.c
+++ /dev/null
@@ -1,277 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PNX833x Hardware Watchdog Driver
- * Copyright 2008 NXP Semiconductors
- * Daniel Laird <daniel.j.laird@nxp.com>
- * Andre McCurdy <andre.mccurdy@nxp.com>
- *
- * Heavily based upon - IndyDog 0.3
- * A Hardware Watchdog Device for SGI IP22
- *
- * (c) Copyright 2002 Guido Guenther <agx@sigxcpu.org>, All Rights Reserved.
- *
- * based on softdog.c by Alan Cox <alan@redhat.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <asm/mach-pnx833x/pnx833x.h>
-
-#define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */
-#define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */
-#define PNX_WATCHDOG_TIMEOUT (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY)
-#define PNX_TIMEOUT_VALUE 2040000000U
-
-/** CONFIG block */
-#define PNX833X_CONFIG (0x07000U)
-#define PNX833X_CONFIG_CPU_WATCHDOG (0x54)
-#define PNX833X_CONFIG_CPU_WATCHDOG_COMPARE (0x58)
-#define PNX833X_CONFIG_CPU_COUNTERS_CONTROL (0x1c)
-
-/** RESET block */
-#define PNX833X_RESET (0x08000U)
-#define PNX833X_RESET_CONFIG (0x08)
-
-static int pnx833x_wdt_alive;
-
-/* Set default timeout in MHZ.*/
-static int pnx833x_wdt_timeout = PNX_WATCHDOG_TIMEOUT;
-module_param(pnx833x_wdt_timeout, int, 0);
-MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default="
- __MODULE_STRING(PNX_TIMEOUT_VALUE) "(30 seconds).");
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-#define START_DEFAULT 1
-static int start_enabled = START_DEFAULT;
-module_param(start_enabled, int, 0);
-MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion "
- "(default=" __MODULE_STRING(START_DEFAULT) ")");
-
-static void pnx833x_wdt_start(void)
-{
- /* Enable watchdog causing reset. */
- PNX833X_REG(PNX833X_RESET + PNX833X_RESET_CONFIG) |= 0x1;
- /* Set timeout.*/
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout;
- /* Enable watchdog. */
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_COUNTERS_CONTROL) |= 0x1;
-
- pr_info("Started watchdog timer\n");
-}
-
-static void pnx833x_wdt_stop(void)
-{
- /* Disable watchdog causing reset. */
- PNX833X_REG(PNX833X_RESET + PNX833X_CONFIG) &= 0xFFFFFFFE;
- /* Disable watchdog.*/
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_COUNTERS_CONTROL) &= 0xFFFFFFFE;
-
- pr_info("Stopped watchdog timer\n");
-}
-
-static void pnx833x_wdt_ping(void)
-{
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout;
-}
-
-/*
- * Allow only one person to hold it open
- */
-static int pnx833x_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(0, &pnx833x_wdt_alive))
- return -EBUSY;
-
- if (nowayout)
- __module_get(THIS_MODULE);
-
- /* Activate timer */
- if (!start_enabled)
- pnx833x_wdt_start();
-
- pnx833x_wdt_ping();
-
- pr_info("Started watchdog timer\n");
-
- return stream_open(inode, file);
-}
-
-static int pnx833x_wdt_release(struct inode *inode, struct file *file)
-{
- /* Shut off the timer.
- * Lock it in if it's a module and we defined ...NOWAYOUT */
- if (!nowayout)
- pnx833x_wdt_stop(); /* Turn the WDT off */
-
- clear_bit(0, &pnx833x_wdt_alive);
- return 0;
-}
-
-static ssize_t pnx833x_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
-{
- /* Refresh the timer. */
- if (len)
- pnx833x_wdt_ping();
-
- return len;
-}
-
-static long pnx833x_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int options, new_timeout = 0;
- uint32_t timeout, timeout_left = 0;
-
- static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
- .firmware_version = 0,
- .identity = "Hardware Watchdog for PNX833x",
- };
-
- switch (cmd) {
- default:
- return -ENOTTY;
-
- case WDIOC_GETSUPPORT:
- if (copy_to_user((struct watchdog_info *)arg,
- &ident, sizeof(ident)))
- return -EFAULT;
- return 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, (int *)arg);
-
- case WDIOC_SETOPTIONS:
- if (get_user(options, (int *)arg))
- return -EFAULT;
-
- if (options & WDIOS_DISABLECARD)
- pnx833x_wdt_stop();
-
- if (options & WDIOS_ENABLECARD)
- pnx833x_wdt_start();
-
- return 0;
-
- case WDIOC_KEEPALIVE:
- pnx833x_wdt_ping();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- {
- if (get_user(new_timeout, (int *)arg))
- return -EFAULT;
-
- pnx833x_wdt_timeout = new_timeout;
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = new_timeout;
- return put_user(new_timeout, (int *)arg);
- }
-
- case WDIOC_GETTIMEOUT:
- timeout = PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG_COMPARE);
- return put_user(timeout, (int *)arg);
-
- case WDIOC_GETTIMELEFT:
- timeout_left = PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG);
- return put_user(timeout_left, (int *)arg);
-
- }
-}
-
-static int pnx833x_wdt_notify_sys(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- if (code == SYS_DOWN || code == SYS_HALT)
- pnx833x_wdt_stop(); /* Turn the WDT off */
-
- return NOTIFY_DONE;
-}
-
-static const struct file_operations pnx833x_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = pnx833x_wdt_write,
- .unlocked_ioctl = pnx833x_wdt_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .open = pnx833x_wdt_open,
- .release = pnx833x_wdt_release,
-};
-
-static struct miscdevice pnx833x_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &pnx833x_wdt_fops,
-};
-
-static struct notifier_block pnx833x_wdt_notifier = {
- .notifier_call = pnx833x_wdt_notify_sys,
-};
-
-static int __init watchdog_init(void)
-{
- int ret, cause;
-
- /* Lets check the reason for the reset.*/
- cause = PNX833X_REG(PNX833X_RESET);
- /*If bit 31 is set then watchdog was cause of reset.*/
- if (cause & 0x80000000) {
- pr_info("The system was previously reset due to the watchdog firing - please investigate...\n");
- }
-
- ret = register_reboot_notifier(&pnx833x_wdt_notifier);
- if (ret) {
- pr_err("cannot register reboot notifier (err=%d)\n", ret);
- return ret;
- }
-
- ret = misc_register(&pnx833x_wdt_miscdev);
- if (ret) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
- unregister_reboot_notifier(&pnx833x_wdt_notifier);
- return ret;
- }
-
- pr_info("Hardware Watchdog Timer for PNX833x: Version 0.1\n");
-
- if (start_enabled)
- pnx833x_wdt_start();
-
- return 0;
-}
-
-static void __exit watchdog_exit(void)
-{
- misc_deregister(&pnx833x_wdt_miscdev);
- unregister_reboot_notifier(&pnx833x_wdt_notifier);
-}
-
-module_init(watchdog_init);
-module_exit(watchdog_exit);
-
-MODULE_AUTHOR("Daniel Laird/Andre McCurdy");
-MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x");
-MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index ab7465d186fd..7cf0f2ec649b 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -148,10 +148,17 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
*/
wmb();
- msleep(150);
+ mdelay(150);
return 0;
}
+static int qcom_wdt_is_running(struct watchdog_device *wdd)
+{
+ struct qcom_wdt *wdt = to_qcom_wdt(wdd);
+
+ return (readl(wdt_addr(wdt, WDT_EN)) & QCOM_WDT_ENABLE);
+}
+
static const struct watchdog_ops qcom_wdt_ops = {
.start = qcom_wdt_start,
.stop = qcom_wdt_stop,
@@ -294,6 +301,17 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.timeout = min(wdt->wdd.max_timeout, 30U);
watchdog_init_timeout(&wdt->wdd, 0, dev);
+ /*
+ * If WDT is already running, call WDT start which
+ * will stop the WDT, set timeouts as bootloader
+ * might use different ones and set running bit
+ * to inform the WDT subsystem to ping the WDT
+ */
+ if (qcom_wdt_is_running(&wdt->wdd)) {
+ qcom_wdt_start(&wdt->wdd);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+ }
+
ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret)
return ret;
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 836319cbaca9..359302f71f7e 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -227,8 +227,10 @@ static int rti_wdt_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret)
+ if (ret) {
+ pm_runtime_put_noidle(dev);
return dev_err_probe(dev, ret, "runtime pm failed\n");
+ }
platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 04483d6453d6..13db71e16583 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -78,7 +78,7 @@ static int fitpc2_wdt_open(struct inode *inode, struct file *file)
return stream_open(inode, file);
}
-static ssize_t fitpc2_wdt_write(struct file *file, const char *data,
+static ssize_t fitpc2_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
size_t i;
@@ -125,16 +125,16 @@ static long fitpc2_wdt_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
+ ret = copy_to_user((struct watchdog_info __user *)arg, &ident,
sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETSTATUS:
- ret = put_user(0, (int *)arg);
+ ret = put_user(0, (int __user *)arg);
break;
case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
+ ret = put_user(0, (int __user *)arg);
break;
case WDIOC_KEEPALIVE:
@@ -143,7 +143,7 @@ static long fitpc2_wdt_ioctl(struct file *file, unsigned int cmd,
break;
case WDIOC_SETTIMEOUT:
- ret = get_user(time, (int *)arg);
+ ret = get_user(time, (int __user *)arg);
if (ret)
break;
@@ -157,7 +157,7 @@ static long fitpc2_wdt_ioctl(struct file *file, unsigned int cmd,
fallthrough;
case WDIOC_GETTIMEOUT:
- ret = put_user(margin, (int *)arg);
+ ret = put_user(margin, (int __user *)arg);
break;
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 190d26e2e75f..958dc32a708f 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -291,6 +291,7 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
+ watchdog_stop_on_reboot(&wdt->wdd);
ret = watchdog_register_device(&wdt->wdd);
if (ret)
goto err;
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index 65cb55f3916f..4e689b6ff141 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -53,7 +54,7 @@
#define SPRD_WDT_CNT_HIGH_SHIFT 16
#define SPRD_WDT_LOW_VALUE_MASK GENMASK(15, 0)
-#define SPRD_WDT_LOAD_TIMEOUT 1000
+#define SPRD_WDT_LOAD_TIMEOUT 11
struct sprd_wdt {
void __iomem *base;
@@ -108,6 +109,23 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
u32 tmr_step = timeout * SPRD_WDT_CNT_STEP;
u32 prtmr_step = pretimeout * SPRD_WDT_CNT_STEP;
+ /*
+ * Checking busy bit to make sure the previous loading operation is
+ * done. According to the specification, the busy bit would be set
+ * after a new loading operation and last 2 or 3 RTC clock
+ * cycles (about 60us~92us).
+ */
+ do {
+ val = readl_relaxed(wdt->base + SPRD_WDT_INT_RAW);
+ if (!(val & SPRD_WDT_LD_BUSY_BIT))
+ break;
+
+ usleep_range(10, 100);
+ } while (delay_cnt++ < SPRD_WDT_LOAD_TIMEOUT);
+
+ if (delay_cnt >= SPRD_WDT_LOAD_TIMEOUT)
+ return -EBUSY;
+
sprd_wdt_unlock(wdt->base);
writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH);
@@ -120,20 +138,6 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
wdt->base + SPRD_WDT_IRQ_LOAD_LOW);
sprd_wdt_lock(wdt->base);
- /*
- * Waiting the load value operation done,
- * it needs two or three RTC clock cycles.
- */
- do {
- val = readl_relaxed(wdt->base + SPRD_WDT_INT_RAW);
- if (!(val & SPRD_WDT_LD_BUSY_BIT))
- break;
-
- cpu_relax();
- } while (delay_cnt++ < SPRD_WDT_LOAD_TIMEOUT);
-
- if (delay_cnt >= SPRD_WDT_LOAD_TIMEOUT)
- return -EBUSY;
return 0;
}
@@ -345,15 +349,10 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
if (ret)
return ret;
- if (watchdog_active(&wdt->wdd)) {
+ if (watchdog_active(&wdt->wdd))
ret = sprd_wdt_start(&wdt->wdd);
- if (ret) {
- sprd_wdt_disable(wdt);
- return ret;
- }
- }
- return 0;
+ return ret;
}
static const struct dev_pm_ops sprd_wdt_pm_ops = {
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index 25188d6bbe15..a3436c296c97 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -162,18 +162,15 @@ static int stm32_iwdg_clk_init(struct platform_device *pdev,
u32 ret;
wdt->clk_lsi = devm_clk_get(dev, "lsi");
- if (IS_ERR(wdt->clk_lsi)) {
- dev_err(dev, "Unable to get lsi clock\n");
- return PTR_ERR(wdt->clk_lsi);
- }
+ if (IS_ERR(wdt->clk_lsi))
+ return dev_err_probe(dev, PTR_ERR(wdt->clk_lsi), "Unable to get lsi clock\n");
/* optional peripheral clock */
if (wdt->data->has_pclk) {
wdt->clk_pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(wdt->clk_pclk)) {
- dev_err(dev, "Unable to get pclk clock\n");
- return PTR_ERR(wdt->clk_pclk);
- }
+ if (IS_ERR(wdt->clk_pclk))
+ return dev_err_probe(dev, PTR_ERR(wdt->clk_pclk),
+ "Unable to get pclk clock\n");
ret = clk_prepare_enable(wdt->clk_pclk);
if (ret) {
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 423844757812..0e9a99559609 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -267,15 +267,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
}
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
- wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
-
- ret = register_reboot_notifier(&wdd->reboot_nb);
- if (ret) {
- pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
- wdd->id, ret);
- watchdog_dev_unregister(wdd);
- ida_simple_remove(&watchdog_ida, id);
- return ret;
+ if (!wdd->ops->stop)
+ pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id);
+ else {
+ wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
+
+ ret = register_reboot_notifier(&wdd->reboot_nb);
+ if (ret) {
+ pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
+ wdd->id, ret);
+ watchdog_dev_unregister(wdd);
+ ida_simple_remove(&watchdog_ida, id);
+ return ret;
+ }
}
}
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 3065dd670a18..cec7917790e5 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -34,9 +34,9 @@ struct wdat_instruction {
* @period: How long is one watchdog period in ms
* @stopped_in_sleep: Is this watchdog stopped by the firmware in S1-S5
* @stopped: Was the watchdog stopped by the driver in suspend
- * @actions: An array of instruction lists indexed by an action number from
- * the WDAT table. There can be %NULL entries for not implemented
- * actions.
+ * @instructions: An array of instruction lists indexed by an action number from
+ * the WDAT table. There can be %NULL entries for not implemented
+ * actions.
*/
struct wdat_wdt {
struct platform_device *pdev;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index babdca808861..c3621b9f4012 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -21,7 +21,7 @@ obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
-obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
+obj-$(CONFIG_XEN_PVHVM_GUEST) += platform-pci.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 6038c4c35db5..e850f79351cb 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -95,7 +95,8 @@ struct irq_info {
struct list_head list;
struct list_head eoi_list;
short refcnt;
- short spurious_cnt;
+ u8 spurious_cnt;
+ u8 is_accounted;
enum xen_irq_type type; /* type */
unsigned irq;
evtchn_port_t evtchn; /* event channel */
@@ -161,6 +162,9 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
+/* Event channel distribution data */
+static atomic_t channels_on_cpu[NR_CPUS];
+
static int **evtchn_to_irq;
#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
@@ -257,6 +261,32 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
irq_set_chip_data(irq, info);
}
+/* Per CPU channel accounting */
+static void channels_on_cpu_dec(struct irq_info *info)
+{
+ if (!info->is_accounted)
+ return;
+
+ info->is_accounted = 0;
+
+ if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
+ return;
+
+ WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
+}
+
+static void channels_on_cpu_inc(struct irq_info *info)
+{
+ if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
+ return;
+
+ if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
+ INT_MAX)))
+ return;
+
+ info->is_accounted = 1;
+}
+
/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
@@ -339,6 +369,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
{
set_evtchn_to_irq(info->evtchn, -1);
info->evtchn = 0;
+ channels_on_cpu_dec(info);
}
/*
@@ -433,18 +464,25 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
+static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
+ bool force_affinity)
{
int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1);
-#ifdef CONFIG_SMP
- cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
-#endif
+
+ if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
+ cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
+ cpumask_copy(irq_get_effective_affinity_mask(irq),
+ cpumask_of(cpu));
+ }
+
xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
+ channels_on_cpu_dec(info);
info->cpu = cpu;
+ channels_on_cpu_inc(info);
}
/**
@@ -523,8 +561,10 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
return;
if (spurious) {
- if ((1 << info->spurious_cnt) < (HZ << 2))
- info->spurious_cnt++;
+ if ((1 << info->spurious_cnt) < (HZ << 2)) {
+ if (info->spurious_cnt != 0xFF)
+ info->spurious_cnt++;
+ }
if (info->spurious_cnt > 1) {
delay = 1 << (info->spurious_cnt - 2);
if (delay > HZ)
@@ -615,11 +655,6 @@ static void xen_irq_init(unsigned irq)
{
struct irq_info *info;
-#ifdef CONFIG_SMP
- /* By default all event channels notify CPU#0. */
- cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
-#endif
-
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
panic("Unable to allocate metadata for IRQ%d\n", irq);
@@ -628,6 +663,11 @@ static void xen_irq_init(unsigned irq)
info->refcnt = -1;
set_info_for_irq(irq, info);
+ /*
+ * Interrupt affinity setting can be immediate. No point
+ * in delaying it until an interrupt is handled.
+ */
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head);
@@ -739,18 +779,7 @@ static void eoi_pirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;
- if (unlikely(irqd_is_setaffinity_pending(data)) &&
- likely(!irqd_irq_disabled(data))) {
- int masked = test_and_set_mask(evtchn);
-
- clear_evtchn(evtchn);
-
- irq_move_masked_irq(data);
-
- if (!masked)
- unmask_evtchn(evtchn);
- } else
- clear_evtchn(evtchn);
+ clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
@@ -794,7 +823,7 @@ static unsigned int __startup_pirq(unsigned int irq)
goto err;
info->evtchn = evtchn;
- bind_evtchn_to_cpu(evtchn, 0);
+ bind_evtchn_to_cpu(evtchn, 0, false);
rc = xen_evtchn_port_setup(evtchn);
if (rc)
@@ -1113,8 +1142,14 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
irq = ret;
goto out;
}
- /* New interdomain events are bound to VCPU 0. */
- bind_evtchn_to_cpu(evtchn, 0);
+ /*
+ * New interdomain events are initially bound to vCPU0 This
+ * is required to setup the event channel in the first
+ * place and also important for UP guests because the
+ * affinity setting is not invoked on them so nothing would
+ * bind the channel.
+ */
+ bind_evtchn_to_cpu(evtchn, 0, false);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
@@ -1132,12 +1167,6 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn)
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
-int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
-{
- return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
-}
-EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
-
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
@@ -1168,7 +1197,11 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
irq = ret;
goto out;
}
- bind_evtchn_to_cpu(evtchn, cpu);
+ /*
+ * Force the affinity mask to the target CPU so proc shows
+ * the correct target.
+ */
+ bind_evtchn_to_cpu(evtchn, cpu, true);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_IPI);
@@ -1281,7 +1314,11 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
goto out;
}
- bind_evtchn_to_cpu(evtchn, cpu);
+ /*
+ * Force the affinity mask for percpu interrupts so proc
+ * shows the correct target.
+ */
+ bind_evtchn_to_cpu(evtchn, cpu, percpu);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_VIRQ);
@@ -1646,9 +1683,7 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
mutex_unlock(&irq_mapping_update_lock);
- bind_evtchn_to_cpu(evtchn, info->cpu);
- /* This will be deferred until interrupt is processed */
- irq_set_affinity(irq, cpumask_of(info->cpu));
+ bind_evtchn_to_cpu(evtchn, info->cpu, false);
/* Unmask the event channel. */
enable_irq(irq);
@@ -1682,7 +1717,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
* it, but don't do the xenlinux-level rebind in that case.
*/
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
- bind_evtchn_to_cpu(evtchn, tcpu);
+ bind_evtchn_to_cpu(evtchn, tcpu, false);
if (!masked)
unmask_evtchn(evtchn);
@@ -1690,27 +1725,47 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
return 0;
}
+/*
+ * Find the CPU within @dest mask which has the least number of channels
+ * assigned. This is not precise as the per cpu counts can be modified
+ * concurrently.
+ */
+static unsigned int select_target_cpu(const struct cpumask *dest)
+{
+ unsigned int cpu, best_cpu = UINT_MAX, minch = UINT_MAX;
+
+ for_each_cpu_and(cpu, dest, cpu_online_mask) {
+ unsigned int curch = atomic_read(&channels_on_cpu[cpu]);
+
+ if (curch < minch) {
+ minch = curch;
+ best_cpu = cpu;
+ }
+ }
+
+ /*
+ * Catch the unlikely case that dest contains no online CPUs. Can't
+ * recurse.
+ */
+ if (best_cpu == UINT_MAX)
+ return select_target_cpu(cpu_online_mask);
+
+ return best_cpu;
+}
+
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
{
- unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
- int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
+ unsigned int tcpu = select_target_cpu(dest);
+ int ret;
+ ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
if (!ret)
irq_data_update_effective_affinity(data, cpumask_of(tcpu));
return ret;
}
-/* To be called with desc->lock held. */
-int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
-{
- struct irq_data *d = irq_desc_get_irq_data(desc);
-
- return set_affinity_irq(d, cpumask_of(tcpu), false);
-}
-EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
-
static void enable_dynirq(struct irq_data *data)
{
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
@@ -1734,18 +1789,7 @@ static void ack_dynirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;
- if (unlikely(irqd_is_setaffinity_pending(data)) &&
- likely(!irqd_irq_disabled(data))) {
- int masked = test_and_set_mask(evtchn);
-
- clear_evtchn(evtchn);
-
- irq_move_masked_irq(data);
-
- if (!masked)
- unmask_evtchn(evtchn);
- } else
- clear_evtchn(evtchn);
+ clear_evtchn(evtchn);
}
static void mask_ack_dynirq(struct irq_data *data)
@@ -1830,7 +1874,8 @@ static void restore_cpu_virqs(unsigned int cpu)
/* Record the new mapping. */
(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
- bind_evtchn_to_cpu(evtchn, cpu);
+ /* The affinity mask is still valid */
+ bind_evtchn_to_cpu(evtchn, cpu, false);
}
}
@@ -1855,7 +1900,8 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Record the new mapping. */
(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
- bind_evtchn_to_cpu(evtchn, cpu);
+ /* The affinity mask is still valid */
+ bind_evtchn_to_cpu(evtchn, cpu, false);
}
}
@@ -1938,8 +1984,12 @@ void xen_irq_resume(void)
xen_evtchn_resume();
/* No IRQ <-> event-channel mappings. */
- list_for_each_entry(info, &xen_irq_list_head, list)
- info->evtchn = 0; /* zap event-channel binding */
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ /* Zap event-channel binding */
+ info->evtchn = 0;
+ /* Adjust accounting */
+ channels_on_cpu_dec(info);
+ }
clear_evtchn_to_irq_all();
@@ -2010,16 +2060,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq,
};
-int xen_set_callback_via(uint64_t via)
-{
- struct xen_hvm_param a;
- a.domid = DOMID_SELF;
- a.index = HVM_PARAM_CALLBACK_IRQ;
- a.value = via;
- return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
-}
-EXPORT_SYMBOL_GPL(xen_set_callback_via);
-
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 5dc016d68f83..a7a85719a8c8 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -421,36 +421,6 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
del_evtchn(u, evtchn);
}
-static DEFINE_PER_CPU(int, bind_last_selected_cpu);
-
-static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn)
-{
- unsigned int selected_cpu, irq;
- struct irq_desc *desc;
- unsigned long flags;
-
- irq = irq_from_evtchn(evtchn);
- desc = irq_to_desc(irq);
-
- if (!desc)
- return;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- selected_cpu = this_cpu_read(bind_last_selected_cpu);
- selected_cpu = cpumask_next_and(selected_cpu,
- desc->irq_common_data.affinity, cpu_online_mask);
-
- if (unlikely(selected_cpu >= nr_cpu_ids))
- selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
- cpu_online_mask);
-
- this_cpu_write(bind_last_selected_cpu, selected_cpu);
-
- /* unmask expects irqs to be disabled */
- xen_set_affinity_evtchn(desc, selected_cpu);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
static long evtchn_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -508,10 +478,8 @@ static long evtchn_ioctl(struct file *file,
break;
rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
- if (rc == 0) {
+ if (rc == 0)
rc = bind_interdomain.local_port;
- evtchn_bind_interdom_next_vcpu(rc);
- }
break;
}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index cd046684e0d1..374d36de7f5a 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -179,6 +179,7 @@ static int poweroff_nb(struct notifier_block *cb, unsigned long code, void *unus
case SYS_HALT:
case SYS_POWER_OFF:
shutting_down = SHUTDOWN_POWEROFF;
+ break;
default:
break;
}
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index dd911e1ff782..18f0ed8b1f93 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -132,6 +132,13 @@ static int platform_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
goto out;
}
+ /*
+ * It doesn't strictly *have* to run on CPU0 but it sure
+ * as hell better process the event channel ports delivered
+ * to CPU0.
+ */
+ irq_set_affinity(pdev->irq, cpumask_of(0));
+
callback_via = get_callback_via(pdev);
ret = xen_set_callback_via(callback_via);
if (ret) {
@@ -149,7 +156,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
ret = gnttab_init();
if (ret)
goto grant_out;
- xenbus_probe(NULL);
return 0;
grant_out:
gnttab_free_auto_xlat_frames();
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index b0c73c58f987..720a7b7abd46 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -717,14 +717,15 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0;
}
-static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
+static long privcmd_ioctl_mmap_resource(struct file *file,
+ struct privcmd_mmap_resource __user *udata)
{
struct privcmd_data *data = file->private_data;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct privcmd_mmap_resource kdata;
xen_pfn_t *pfns = NULL;
- struct xen_mem_acquire_resource xdata;
+ struct xen_mem_acquire_resource xdata = { };
int rc;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
@@ -734,6 +735,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
+ /* Both fields must be set or unset */
+ if (!!kdata.addr != !!kdata.num)
+ return -EINVAL;
+
+ xdata.domid = kdata.dom;
+ xdata.type = kdata.type;
+ xdata.id = kdata.id;
+
+ if (!kdata.addr && !kdata.num) {
+ /* Query the size of the resource. */
+ rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
+ if (rc)
+ return rc;
+ return __put_user(xdata.nr_frames, &udata->num);
+ }
+
mmap_write_lock(mm);
vma = find_vma(mm, kdata.addr);
@@ -768,10 +785,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
} else
vma->vm_private_data = PRIV_VMA_LOCKED;
- memset(&xdata, 0, sizeof(xdata));
- xdata.domid = kdata.dom;
- xdata.type = kdata.type;
- xdata.id = kdata.id;
xdata.frame = kdata.idx;
xdata.nr_frames = kdata.num;
set_xen_guest_handle(xdata.frame_list, pfns);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 4b99ec3dec58..e7c692cfb2cf 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -689,7 +689,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
/* watch the backend node for backend configuration information */
err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
- xen_pcibk_be_watch);
+ NULL, xen_pcibk_be_watch);
if (err)
goto out;
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 5f5b8a7d5b80..dc1537335414 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -44,6 +44,8 @@ struct xen_bus_type {
int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
int (*probe)(struct xen_bus_type *bus, const char *type,
const char *dir);
+ bool (*otherend_will_handle)(struct xenbus_watch *watch,
+ const char *path, const char *token);
void (*otherend_changed)(struct xenbus_watch *watch, const char *path,
const char *token);
struct bus_type bus;
@@ -113,6 +115,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
int xenbus_probe_devices(struct xen_bus_type *bus);
+void xenbus_probe(void);
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index fd80e318b99c..0cd728961fce 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -127,18 +127,22 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
*/
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *))
{
int err;
watch->node = path;
+ watch->will_handle = will_handle;
watch->callback = callback;
err = register_xenbus_watch(watch);
if (err) {
watch->node = NULL;
+ watch->will_handle = NULL;
watch->callback = NULL;
xenbus_dev_fatal(dev, err, "adding watch on %s", path);
}
@@ -165,6 +169,8 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
*/
int xenbus_watch_pathfmt(struct xenbus_device *dev,
struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *),
const char *pathfmt, ...)
@@ -181,7 +187,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev,
xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
return -ENOMEM;
}
- err = xenbus_watch_path(dev, path, watch, callback);
+ err = xenbus_watch_path(dev, path, watch, will_handle, callback);
if (err)
kfree(path);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index eb5151fc8efa..e5fda0256feb 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
static int xenbus_irq;
static struct task_struct *xenbus_task;
-static DECLARE_WORK(probe_work, xenbus_probe);
-
-
static irqreturn_t wake_waiting(int irq, void *unused)
{
- if (unlikely(xenstored_ready == 0)) {
- xenstored_ready = 1;
- schedule_work(&probe_work);
- }
-
wake_up(&xb_waitq);
return IRQ_HANDLED;
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 38725d97d909..18ffd0551b54 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -136,6 +136,7 @@ static int watch_otherend(struct xenbus_device *dev)
container_of(dev->dev.bus, struct xen_bus_type, bus);
return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
+ bus->otherend_will_handle,
bus->otherend_changed,
"%s/%s", dev->otherend, "state");
}
@@ -682,29 +683,107 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
-void xenbus_probe(struct work_struct *unused)
+void xenbus_probe(void)
{
xenstored_ready = 1;
+ /*
+ * In the HVM case, xenbus_init() deferred its call to
+ * xs_init() in case callbacks were not operational yet.
+ * So do it now.
+ */
+ if (xen_store_domain_type == XS_HVM)
+ xs_init();
+
/* Notify others that xenstore is up */
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
}
-EXPORT_SYMBOL_GPL(xenbus_probe);
-static int __init xenbus_probe_initcall(void)
+/*
+ * Returns true when XenStore init must be deferred in order to
+ * allow the PCI platform device to be initialised, before we
+ * can actually have event channel interrupts working.
+ */
+static bool xs_hvm_defer_init_for_callback(void)
{
- if (!xen_domain())
- return -ENODEV;
+#ifdef CONFIG_XEN_PVHVM
+ return xen_store_domain_type == XS_HVM &&
+ !xen_have_vector_callback;
+#else
+ return false;
+#endif
+}
- if (xen_initial_domain() || xen_hvm_domain())
- return 0;
+static int xenbus_probe_thread(void *unused)
+{
+ DEFINE_WAIT(w);
- xenbus_probe(NULL);
+ /*
+ * We actually just want to wait for *any* trigger of xb_waitq,
+ * and run xenbus_probe() the moment it occurs.
+ */
+ prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&xb_waitq, &w);
+
+ DPRINTK("probing");
+ xenbus_probe();
return 0;
}
+static int __init xenbus_probe_initcall(void)
+{
+ /*
+ * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
+ * need to wait for the platform PCI device to come up.
+ */
+ if (xen_store_domain_type == XS_PV ||
+ (xen_store_domain_type == XS_HVM &&
+ !xs_hvm_defer_init_for_callback()))
+ xenbus_probe();
+
+ /*
+ * For XS_LOCAL, spawn a thread which will wait for xenstored
+ * or a xenstore-stubdom to be started, then probe. It will be
+ * triggered when communication starts happening, by waiting
+ * on xb_waitq.
+ */
+ if (xen_store_domain_type == XS_LOCAL) {
+ struct task_struct *probe_task;
+
+ probe_task = kthread_run(xenbus_probe_thread, NULL,
+ "xenbus_probe");
+ if (IS_ERR(probe_task))
+ return PTR_ERR(probe_task);
+ }
+ return 0;
+}
device_initcall(xenbus_probe_initcall);
+int xen_set_callback_via(uint64_t via)
+{
+ struct xen_hvm_param a;
+ int ret;
+
+ a.domid = DOMID_SELF;
+ a.index = HVM_PARAM_CALLBACK_IRQ;
+ a.value = via;
+
+ ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+ if (ret)
+ return ret;
+
+ /*
+ * If xenbus_probe_initcall() deferred the xenbus_probe()
+ * due to the callback not functioning yet, we can do it now.
+ */
+ if (!xenstored_ready && xs_hvm_defer_init_for_callback())
+ xenbus_probe();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
+
/* Set up event channel for xenstored which is run as a local process
* (this is normally used only in dom0)
*/
@@ -817,11 +896,17 @@ static int __init xenbus_init(void)
break;
}
- /* Initialize the interface to xenstore. */
- err = xs_init();
- if (err) {
- pr_warn("Error initializing xenstore comms: %i\n", err);
- goto out_error;
+ /*
+ * HVM domains may not have a functional callback yet. In that
+ * case let xs_init() be called from xenbus_probe(), which will
+ * get invoked at an appropriate time.
+ */
+ if (xen_store_domain_type != XS_HVM) {
+ err = xs_init();
+ if (err) {
+ pr_warn("Error initializing xenstore comms: %i\n", err);
+ goto out_error;
+ }
}
if ((xen_store_domain_type != XS_LOCAL) &&
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 2ba699897e6d..5abded97e1a7 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -180,6 +180,12 @@ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
return err;
}
+static bool frontend_will_handle(struct xenbus_watch *watch,
+ const char *path, const char *token)
+{
+ return watch->nr_pending == 0;
+}
+
static void frontend_changed(struct xenbus_watch *watch,
const char *path, const char *token)
{
@@ -191,6 +197,7 @@ static struct xen_bus_type xenbus_backend = {
.levels = 3, /* backend/type/<frontend>/<id> */
.get_bus_id = backend_bus_id,
.probe = xenbus_probe_backend,
+ .otherend_will_handle = frontend_will_handle,
.otherend_changed = frontend_changed,
.bus = {
.name = "xen-backend",
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 3a06eb699f33..12e02eb01f59 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -705,9 +705,13 @@ int xs_watch_msg(struct xs_watch_event *event)
spin_lock(&watches_lock);
event->handle = find_watch(event->token);
- if (event->handle != NULL) {
+ if (event->handle != NULL &&
+ (!event->handle->will_handle ||
+ event->handle->will_handle(event->handle,
+ event->path, event->token))) {
spin_lock(&watch_events_lock);
list_add_tail(&event->list, &watch_events);
+ event->handle->nr_pending++;
wake_up(&watch_events_waitq);
spin_unlock(&watch_events_lock);
} else
@@ -765,6 +769,8 @@ int register_xenbus_watch(struct xenbus_watch *watch)
sprintf(token, "%lX", (long)watch);
+ watch->nr_pending = 0;
+
down_read(&xs_watch_rwsem);
spin_lock(&watches_lock);
@@ -814,11 +820,14 @@ void unregister_xenbus_watch(struct xenbus_watch *watch)
/* Cancel pending watch events. */
spin_lock(&watch_events_lock);
- list_for_each_entry_safe(event, tmp, &watch_events, list) {
- if (event->handle != watch)
- continue;
- list_del(&event->list);
- kfree(event);
+ if (watch->nr_pending) {
+ list_for_each_entry_safe(event, tmp, &watch_events, list) {
+ if (event->handle != watch)
+ continue;
+ list_del(&event->list);
+ kfree(event);
+ }
+ watch->nr_pending = 0;
}
spin_unlock(&watch_events_lock);
@@ -865,7 +874,6 @@ void xs_suspend_cancel(void)
static int xenwatch_thread(void *unused)
{
- struct list_head *ent;
struct xs_watch_event *event;
xenwatch_pid = current->pid;
@@ -880,13 +888,15 @@ static int xenwatch_thread(void *unused)
mutex_lock(&xenwatch_mutex);
spin_lock(&watch_events_lock);
- ent = watch_events.next;
- if (ent != &watch_events)
- list_del(ent);
+ event = list_first_entry_or_null(&watch_events,
+ struct xs_watch_event, list);
+ if (event) {
+ list_del(&event->list);
+ event->handle->nr_pending--;
+ }
spin_unlock(&watch_events_lock);
- if (ent != &watch_events) {
- event = list_entry(ent, struct xs_watch_event, list);
+ if (event) {
event->handle->callback(event->handle, event->path,
event->token);
kfree(event);